var/home/core/zuul-output/0000755000175000017500000000000015111507156014527 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111524055015470 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005345707715111524046017716 0ustar rootrootNov 26 05:25:40 crc systemd[1]: Starting Kubernetes Kubelet... Nov 26 05:25:40 crc restorecon[4680]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 05:25:40 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 05:25:41 crc restorecon[4680]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 05:25:41 crc restorecon[4680]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 26 05:25:42 crc kubenswrapper[4871]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 05:25:42 crc kubenswrapper[4871]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 26 05:25:42 crc kubenswrapper[4871]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 05:25:42 crc kubenswrapper[4871]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 05:25:42 crc kubenswrapper[4871]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 26 05:25:42 crc kubenswrapper[4871]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.238683 4871 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246418 4871 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246452 4871 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246463 4871 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246472 4871 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246481 4871 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246489 4871 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246498 4871 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246506 4871 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246513 4871 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246521 4871 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246555 4871 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246563 4871 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246573 4871 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246584 4871 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246594 4871 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246602 4871 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246617 4871 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246625 4871 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246633 4871 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246642 4871 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246651 4871 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246660 4871 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246668 4871 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246676 4871 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246685 4871 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246693 4871 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246700 4871 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246707 4871 feature_gate.go:330] unrecognized feature gate: Example Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246717 4871 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246725 4871 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246733 4871 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246740 4871 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246748 4871 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246755 4871 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246763 4871 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246771 4871 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246779 4871 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246786 4871 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246818 4871 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246826 4871 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246835 4871 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246844 4871 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246851 4871 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246859 4871 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246867 4871 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246874 4871 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246882 4871 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246889 4871 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246897 4871 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246904 4871 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246913 4871 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246922 4871 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246929 4871 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246937 4871 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246949 4871 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246959 4871 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246968 4871 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246976 4871 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246984 4871 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246991 4871 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.246999 4871 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.247006 4871 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.247015 4871 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.247022 4871 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.247030 4871 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.247037 4871 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.247044 4871 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.247052 4871 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.247060 4871 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.247067 4871 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.247076 4871 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248023 4871 flags.go:64] FLAG: --address="0.0.0.0" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248049 4871 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248066 4871 flags.go:64] FLAG: --anonymous-auth="true" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248077 4871 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248088 4871 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248097 4871 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248109 4871 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248120 4871 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248129 4871 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248138 4871 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248148 4871 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248158 4871 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248168 4871 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248177 4871 flags.go:64] FLAG: --cgroup-root="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248186 4871 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248195 4871 flags.go:64] FLAG: --client-ca-file="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248204 4871 flags.go:64] FLAG: --cloud-config="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248213 4871 flags.go:64] FLAG: --cloud-provider="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248222 4871 flags.go:64] FLAG: --cluster-dns="[]" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248232 4871 flags.go:64] FLAG: --cluster-domain="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248241 4871 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248250 4871 flags.go:64] FLAG: --config-dir="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248259 4871 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248269 4871 flags.go:64] FLAG: --container-log-max-files="5" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248280 4871 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248288 4871 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248298 4871 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248308 4871 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248317 4871 flags.go:64] FLAG: --contention-profiling="false" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248327 4871 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248335 4871 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248345 4871 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248354 4871 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248365 4871 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248374 4871 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248384 4871 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248393 4871 flags.go:64] FLAG: --enable-load-reader="false" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248402 4871 flags.go:64] FLAG: --enable-server="true" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248411 4871 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248422 4871 flags.go:64] FLAG: --event-burst="100" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248431 4871 flags.go:64] FLAG: --event-qps="50" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248441 4871 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248449 4871 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248460 4871 flags.go:64] FLAG: --eviction-hard="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248470 4871 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248478 4871 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248487 4871 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248496 4871 flags.go:64] FLAG: --eviction-soft="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248506 4871 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248514 4871 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248523 4871 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248559 4871 flags.go:64] FLAG: --experimental-mounter-path="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248568 4871 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248577 4871 flags.go:64] FLAG: --fail-swap-on="true" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248586 4871 flags.go:64] FLAG: --feature-gates="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248596 4871 flags.go:64] FLAG: --file-check-frequency="20s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248605 4871 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248614 4871 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248623 4871 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248632 4871 flags.go:64] FLAG: --healthz-port="10248" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248642 4871 flags.go:64] FLAG: --help="false" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248651 4871 flags.go:64] FLAG: --hostname-override="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248659 4871 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248668 4871 flags.go:64] FLAG: --http-check-frequency="20s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248678 4871 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248686 4871 flags.go:64] FLAG: --image-credential-provider-config="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248695 4871 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248703 4871 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248712 4871 flags.go:64] FLAG: --image-service-endpoint="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248721 4871 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248730 4871 flags.go:64] FLAG: --kube-api-burst="100" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248740 4871 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248749 4871 flags.go:64] FLAG: --kube-api-qps="50" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248758 4871 flags.go:64] FLAG: --kube-reserved="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248768 4871 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248777 4871 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248786 4871 flags.go:64] FLAG: --kubelet-cgroups="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248794 4871 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248803 4871 flags.go:64] FLAG: --lock-file="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248811 4871 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248820 4871 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248829 4871 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248842 4871 flags.go:64] FLAG: --log-json-split-stream="false" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248851 4871 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248860 4871 flags.go:64] FLAG: --log-text-split-stream="false" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248869 4871 flags.go:64] FLAG: --logging-format="text" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248878 4871 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248887 4871 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248895 4871 flags.go:64] FLAG: --manifest-url="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248904 4871 flags.go:64] FLAG: --manifest-url-header="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248917 4871 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248926 4871 flags.go:64] FLAG: --max-open-files="1000000" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248937 4871 flags.go:64] FLAG: --max-pods="110" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248948 4871 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248957 4871 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248966 4871 flags.go:64] FLAG: --memory-manager-policy="None" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248975 4871 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248984 4871 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.248993 4871 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249002 4871 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249021 4871 flags.go:64] FLAG: --node-status-max-images="50" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249030 4871 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249039 4871 flags.go:64] FLAG: --oom-score-adj="-999" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249048 4871 flags.go:64] FLAG: --pod-cidr="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249056 4871 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249069 4871 flags.go:64] FLAG: --pod-manifest-path="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249078 4871 flags.go:64] FLAG: --pod-max-pids="-1" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249089 4871 flags.go:64] FLAG: --pods-per-core="0" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249098 4871 flags.go:64] FLAG: --port="10250" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249107 4871 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249116 4871 flags.go:64] FLAG: --provider-id="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249125 4871 flags.go:64] FLAG: --qos-reserved="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249134 4871 flags.go:64] FLAG: --read-only-port="10255" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249143 4871 flags.go:64] FLAG: --register-node="true" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249151 4871 flags.go:64] FLAG: --register-schedulable="true" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249160 4871 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249174 4871 flags.go:64] FLAG: --registry-burst="10" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249183 4871 flags.go:64] FLAG: --registry-qps="5" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249191 4871 flags.go:64] FLAG: --reserved-cpus="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249200 4871 flags.go:64] FLAG: --reserved-memory="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249210 4871 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249219 4871 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249229 4871 flags.go:64] FLAG: --rotate-certificates="false" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249238 4871 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249246 4871 flags.go:64] FLAG: --runonce="false" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249255 4871 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249274 4871 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249283 4871 flags.go:64] FLAG: --seccomp-default="false" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249292 4871 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249301 4871 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249310 4871 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249319 4871 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249328 4871 flags.go:64] FLAG: --storage-driver-password="root" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249336 4871 flags.go:64] FLAG: --storage-driver-secure="false" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249345 4871 flags.go:64] FLAG: --storage-driver-table="stats" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249354 4871 flags.go:64] FLAG: --storage-driver-user="root" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249363 4871 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249372 4871 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249381 4871 flags.go:64] FLAG: --system-cgroups="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249390 4871 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249404 4871 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249413 4871 flags.go:64] FLAG: --tls-cert-file="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249422 4871 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249433 4871 flags.go:64] FLAG: --tls-min-version="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249441 4871 flags.go:64] FLAG: --tls-private-key-file="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249450 4871 flags.go:64] FLAG: --topology-manager-policy="none" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249459 4871 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249468 4871 flags.go:64] FLAG: --topology-manager-scope="container" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249477 4871 flags.go:64] FLAG: --v="2" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249496 4871 flags.go:64] FLAG: --version="false" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249507 4871 flags.go:64] FLAG: --vmodule="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249520 4871 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.249553 4871 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249758 4871 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249770 4871 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249779 4871 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249787 4871 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249797 4871 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249807 4871 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249816 4871 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249825 4871 feature_gate.go:330] unrecognized feature gate: Example Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249833 4871 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249841 4871 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249850 4871 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249859 4871 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249868 4871 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249877 4871 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249884 4871 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249892 4871 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249900 4871 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249907 4871 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249915 4871 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249922 4871 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249930 4871 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249938 4871 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249948 4871 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249958 4871 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249967 4871 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249976 4871 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249985 4871 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.249994 4871 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250003 4871 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250011 4871 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250019 4871 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250028 4871 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250036 4871 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250046 4871 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250055 4871 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250064 4871 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250073 4871 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250081 4871 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250089 4871 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250096 4871 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250104 4871 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250112 4871 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250120 4871 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250128 4871 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250135 4871 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250143 4871 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250151 4871 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250158 4871 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250166 4871 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250178 4871 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250185 4871 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250193 4871 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250201 4871 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250208 4871 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250216 4871 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250223 4871 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250231 4871 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250239 4871 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250247 4871 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250254 4871 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250261 4871 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250269 4871 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250278 4871 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250286 4871 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250293 4871 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250301 4871 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250308 4871 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250316 4871 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250324 4871 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250332 4871 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.250339 4871 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.250362 4871 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.263223 4871 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.263279 4871 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263384 4871 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263394 4871 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263399 4871 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263404 4871 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263410 4871 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263416 4871 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263422 4871 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263428 4871 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263433 4871 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263437 4871 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263441 4871 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263445 4871 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263450 4871 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263456 4871 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263465 4871 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263468 4871 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263472 4871 feature_gate.go:330] unrecognized feature gate: Example Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263476 4871 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263480 4871 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263486 4871 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263491 4871 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263496 4871 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263500 4871 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263505 4871 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263511 4871 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263515 4871 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263519 4871 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263536 4871 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263540 4871 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263544 4871 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263553 4871 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263557 4871 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263561 4871 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263566 4871 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263571 4871 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263575 4871 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263579 4871 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263582 4871 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263586 4871 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263590 4871 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263594 4871 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263598 4871 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263602 4871 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263606 4871 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263611 4871 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263615 4871 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263619 4871 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263623 4871 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263627 4871 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263631 4871 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263636 4871 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263641 4871 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263646 4871 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263651 4871 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263656 4871 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263661 4871 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263665 4871 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263669 4871 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263674 4871 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263680 4871 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263686 4871 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263691 4871 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263695 4871 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263700 4871 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263704 4871 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263709 4871 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263713 4871 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263718 4871 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263722 4871 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263726 4871 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263729 4871 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.263739 4871 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263902 4871 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263912 4871 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263916 4871 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263921 4871 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263926 4871 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263932 4871 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263936 4871 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263941 4871 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263945 4871 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263949 4871 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263953 4871 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263957 4871 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263961 4871 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263965 4871 feature_gate.go:330] unrecognized feature gate: Example Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263969 4871 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263973 4871 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263977 4871 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263981 4871 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263985 4871 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263989 4871 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263993 4871 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.263999 4871 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264003 4871 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264007 4871 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264010 4871 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264014 4871 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264018 4871 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264022 4871 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264028 4871 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264032 4871 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264036 4871 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264040 4871 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264044 4871 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264047 4871 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264051 4871 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264055 4871 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264058 4871 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264064 4871 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264068 4871 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264073 4871 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264077 4871 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264080 4871 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264084 4871 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264087 4871 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264091 4871 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264094 4871 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264098 4871 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264103 4871 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264108 4871 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264113 4871 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264116 4871 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264120 4871 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264124 4871 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264127 4871 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264131 4871 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264136 4871 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264140 4871 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264144 4871 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264148 4871 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264152 4871 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264155 4871 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264159 4871 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264163 4871 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264168 4871 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264172 4871 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264176 4871 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264179 4871 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264183 4871 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264187 4871 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264191 4871 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.264194 4871 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.264202 4871 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.264432 4871 server.go:940] "Client rotation is on, will bootstrap in background" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.269286 4871 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.269394 4871 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.271629 4871 server.go:997] "Starting client certificate rotation" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.271661 4871 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.272833 4871 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-21 21:28:39.16322264 +0000 UTC Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.273015 4871 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 616h2m56.890212893s for next certificate rotation Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.302066 4871 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.304068 4871 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.318916 4871 log.go:25] "Validated CRI v1 runtime API" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.359571 4871 log.go:25] "Validated CRI v1 image API" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.362043 4871 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.368418 4871 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-26-05-20-51-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.368471 4871 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.400707 4871 manager.go:217] Machine: {Timestamp:2025-11-26 05:25:42.397221778 +0000 UTC m=+0.580273404 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:f5747dac-e851-4efb-9d51-1bea82126d22 BootID:4a642753-bf13-4675-b42b-d7df47f40ffd Filesystems:[{Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:e3:81:4b Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:e3:81:4b Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:4a:1f:bf Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:71:c5:b5 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:12:a6:c6 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:5a:e2:7d Speed:-1 Mtu:1496} {Name:eth10 MacAddress:fe:b5:af:eb:00:58 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:22:46:e1:43:bc:23 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.401136 4871 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.401333 4871 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.407760 4871 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.409771 4871 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.409878 4871 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.410494 4871 topology_manager.go:138] "Creating topology manager with none policy" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.410518 4871 container_manager_linux.go:303] "Creating device plugin manager" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.411225 4871 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.411284 4871 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.412327 4871 state_mem.go:36] "Initialized new in-memory state store" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.412507 4871 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.417985 4871 kubelet.go:418] "Attempting to sync node with API server" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.418050 4871 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.418105 4871 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.418130 4871 kubelet.go:324] "Adding apiserver pod source" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.418150 4871 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.422654 4871 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.423556 4871 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.44:6443: connect: connection refused Nov 26 05:25:42 crc kubenswrapper[4871]: E1126 05:25:42.423692 4871 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.44:6443: connect: connection refused" logger="UnhandledError" Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.423557 4871 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.44:6443: connect: connection refused Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.423745 4871 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 26 05:25:42 crc kubenswrapper[4871]: E1126 05:25:42.423837 4871 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.44:6443: connect: connection refused" logger="UnhandledError" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.426409 4871 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.428093 4871 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.428145 4871 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.428165 4871 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.428182 4871 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.428213 4871 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.428231 4871 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.428248 4871 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.428276 4871 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.428297 4871 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.428316 4871 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.428338 4871 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.428355 4871 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.429318 4871 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.430425 4871 server.go:1280] "Started kubelet" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.430574 4871 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.44:6443: connect: connection refused Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.431501 4871 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.431507 4871 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.432386 4871 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 26 05:25:42 crc systemd[1]: Started Kubernetes Kubelet. Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.434183 4871 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.434243 4871 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.435301 4871 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.435328 4871 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.435472 4871 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 26 05:25:42 crc kubenswrapper[4871]: E1126 05:25:42.435569 4871 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.434741 4871 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 17:23:33.471371205 +0000 UTC Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.435802 4871 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1187h57m51.035580143s for next certificate rotation Nov 26 05:25:42 crc kubenswrapper[4871]: E1126 05:25:42.436601 4871 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.44:6443: connect: connection refused" interval="200ms" Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.436767 4871 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.44:6443: connect: connection refused Nov 26 05:25:42 crc kubenswrapper[4871]: E1126 05:25:42.436859 4871 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.44:6443: connect: connection refused" logger="UnhandledError" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.438364 4871 server.go:460] "Adding debug handlers to kubelet server" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.436693 4871 factory.go:55] Registering systemd factory Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.441373 4871 factory.go:221] Registration of the systemd container factory successfully Nov 26 05:25:42 crc kubenswrapper[4871]: E1126 05:25:42.439319 4871 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.44:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b7733927cef24 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 05:25:42.430379812 +0000 UTC m=+0.613431438,LastTimestamp:2025-11-26 05:25:42.430379812 +0000 UTC m=+0.613431438,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452251 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452347 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452371 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452395 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452417 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452437 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452458 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452478 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452502 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452523 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452574 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452595 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452617 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452642 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452662 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452681 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452701 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452722 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452742 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452763 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452782 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452802 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452820 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452844 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452865 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452890 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452916 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452938 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452959 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.452981 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453002 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453022 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453052 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453076 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453101 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453127 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453159 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453183 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453202 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453223 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453244 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453263 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453285 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453306 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453329 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453350 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453370 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453392 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453416 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453437 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453456 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453476 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453503 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453553 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453577 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453598 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453619 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453640 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453658 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453678 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453698 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453719 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453738 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453759 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453779 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453800 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453820 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453842 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453865 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453884 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453904 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453924 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453944 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453963 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.453983 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454002 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454022 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454043 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454064 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454083 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454103 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454122 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454141 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454162 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454183 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454202 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454222 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454243 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454262 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454282 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454283 4871 factory.go:153] Registering CRI-O factory Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454336 4871 factory.go:221] Registration of the crio container factory successfully Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454480 4871 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454558 4871 factory.go:103] Registering Raw factory Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454599 4871 manager.go:1196] Started watching for new ooms in manager Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.455765 4871 manager.go:319] Starting recovery of all containers Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.454303 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.458800 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.459041 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.459239 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.459369 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.459483 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.459630 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.459747 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.459990 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.460135 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.460321 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.460499 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.460716 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.460932 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.465886 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.466600 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.466673 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.466696 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.466741 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.466764 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.466802 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.466819 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.466839 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.466874 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.466891 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.466935 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.466949 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.466964 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.466980 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467028 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467043 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467059 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467078 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467114 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467130 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467143 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467156 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467189 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467202 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467215 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467255 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467289 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467304 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467317 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467339 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467353 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467366 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467445 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467481 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467497 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467512 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467568 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467584 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467597 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467627 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467665 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467679 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467695 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467708 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467723 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467739 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467752 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467788 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467802 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467815 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467855 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467870 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467883 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467916 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467930 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467942 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467973 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.467989 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.468024 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.468042 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.468082 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.468119 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.468142 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.468162 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.468951 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.468973 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.468989 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.474844 4871 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.474953 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.474976 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.474990 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475006 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475020 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475034 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475050 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475064 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475077 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475090 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475103 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475119 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475132 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475146 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475158 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475172 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475185 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475198 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475212 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475227 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475242 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475256 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475269 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475296 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475308 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475321 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475335 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475348 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475361 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475374 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475387 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475401 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475414 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475427 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475439 4871 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475452 4871 reconstruct.go:97] "Volume reconstruction finished" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.475462 4871 reconciler.go:26] "Reconciler: start to sync state" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.486544 4871 manager.go:324] Recovery completed Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.497685 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.500288 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.500359 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.500382 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.501581 4871 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.501813 4871 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.502011 4871 state_mem.go:36] "Initialized new in-memory state store" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.502843 4871 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.505928 4871 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.505997 4871 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.506052 4871 kubelet.go:2335] "Starting kubelet main sync loop" Nov 26 05:25:42 crc kubenswrapper[4871]: E1126 05:25:42.506280 4871 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 26 05:25:42 crc kubenswrapper[4871]: W1126 05:25:42.508935 4871 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.44:6443: connect: connection refused Nov 26 05:25:42 crc kubenswrapper[4871]: E1126 05:25:42.509034 4871 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.44:6443: connect: connection refused" logger="UnhandledError" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.523842 4871 policy_none.go:49] "None policy: Start" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.524694 4871 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.524733 4871 state_mem.go:35] "Initializing new in-memory state store" Nov 26 05:25:42 crc kubenswrapper[4871]: E1126 05:25:42.536357 4871 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.592777 4871 manager.go:334] "Starting Device Plugin manager" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.593204 4871 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.593223 4871 server.go:79] "Starting device plugin registration server" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.593774 4871 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.593797 4871 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.594119 4871 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.594241 4871 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.594255 4871 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 26 05:25:42 crc kubenswrapper[4871]: E1126 05:25:42.604959 4871 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.607311 4871 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.607442 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.609113 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.609155 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.609191 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.609422 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.609672 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.609745 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.610578 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.610753 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.610836 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.610920 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.610933 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.611158 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.611358 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.611598 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.611676 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.612435 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.612457 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.612468 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.612587 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.612748 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.612806 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.613155 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.613196 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.613216 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.613279 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.613303 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.613317 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.613481 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.613650 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.613700 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.614609 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.614636 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.614648 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.614652 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.614696 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.614720 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.614855 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.614871 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.614882 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.615094 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.615122 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.615944 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.615974 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.615985 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:42 crc kubenswrapper[4871]: E1126 05:25:42.637605 4871 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.44:6443: connect: connection refused" interval="400ms" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.677313 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.677365 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.677461 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.677565 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.677615 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.677652 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.677676 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.677773 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.677839 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.677890 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.677928 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.677960 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.677990 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.678021 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.678090 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.694763 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.696218 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.696391 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.696521 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.696695 4871 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 05:25:42 crc kubenswrapper[4871]: E1126 05:25:42.697426 4871 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.44:6443: connect: connection refused" node="crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.779163 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.779221 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.779254 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.779430 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.779453 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.779594 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.779655 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.779687 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.779754 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.779820 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.779819 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.779855 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.779922 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.779986 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.780055 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.780169 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.780237 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.780324 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.780267 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.780388 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.780423 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.780451 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.780515 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.780634 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.780692 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.780754 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.780834 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.780811 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.780874 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.780956 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.898486 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.900494 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.900574 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.900593 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.900629 4871 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 05:25:42 crc kubenswrapper[4871]: E1126 05:25:42.901351 4871 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.44:6443: connect: connection refused" node="crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.947560 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.964591 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 05:25:42 crc kubenswrapper[4871]: I1126 05:25:42.981290 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 26 05:25:43 crc kubenswrapper[4871]: W1126 05:25:43.007504 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-69d67bccd1414617a37855c3cd27620b9aa8df0f310b755d95c208b1e3b851e6 WatchSource:0}: Error finding container 69d67bccd1414617a37855c3cd27620b9aa8df0f310b755d95c208b1e3b851e6: Status 404 returned error can't find the container with id 69d67bccd1414617a37855c3cd27620b9aa8df0f310b755d95c208b1e3b851e6 Nov 26 05:25:43 crc kubenswrapper[4871]: I1126 05:25:43.009561 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 05:25:43 crc kubenswrapper[4871]: W1126 05:25:43.011483 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-95ca4335ae4a6b25fe3d0d7cf1602e3a2501b02cdbb33d29d8a2b8c4274c505d WatchSource:0}: Error finding container 95ca4335ae4a6b25fe3d0d7cf1602e3a2501b02cdbb33d29d8a2b8c4274c505d: Status 404 returned error can't find the container with id 95ca4335ae4a6b25fe3d0d7cf1602e3a2501b02cdbb33d29d8a2b8c4274c505d Nov 26 05:25:43 crc kubenswrapper[4871]: W1126 05:25:43.015788 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-703d2f7f6089c3af76d14414e8a5ac68e44d31cce645115a00ec48230908debd WatchSource:0}: Error finding container 703d2f7f6089c3af76d14414e8a5ac68e44d31cce645115a00ec48230908debd: Status 404 returned error can't find the container with id 703d2f7f6089c3af76d14414e8a5ac68e44d31cce645115a00ec48230908debd Nov 26 05:25:43 crc kubenswrapper[4871]: I1126 05:25:43.020113 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:25:43 crc kubenswrapper[4871]: W1126 05:25:43.034716 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-7b644e431efc97e8949f7b5acf6600baacf8e497cbb5385127272dd87bb323f9 WatchSource:0}: Error finding container 7b644e431efc97e8949f7b5acf6600baacf8e497cbb5385127272dd87bb323f9: Status 404 returned error can't find the container with id 7b644e431efc97e8949f7b5acf6600baacf8e497cbb5385127272dd87bb323f9 Nov 26 05:25:43 crc kubenswrapper[4871]: E1126 05:25:43.038754 4871 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.44:6443: connect: connection refused" interval="800ms" Nov 26 05:25:43 crc kubenswrapper[4871]: W1126 05:25:43.059364 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-e37080175ee2df3ed44f9336d89b297b597944f49e87c99554a7327376df709c WatchSource:0}: Error finding container e37080175ee2df3ed44f9336d89b297b597944f49e87c99554a7327376df709c: Status 404 returned error can't find the container with id e37080175ee2df3ed44f9336d89b297b597944f49e87c99554a7327376df709c Nov 26 05:25:43 crc kubenswrapper[4871]: I1126 05:25:43.301648 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:43 crc kubenswrapper[4871]: I1126 05:25:43.303723 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:43 crc kubenswrapper[4871]: I1126 05:25:43.303795 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:43 crc kubenswrapper[4871]: I1126 05:25:43.303816 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:43 crc kubenswrapper[4871]: I1126 05:25:43.303853 4871 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 05:25:43 crc kubenswrapper[4871]: E1126 05:25:43.304830 4871 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.44:6443: connect: connection refused" node="crc" Nov 26 05:25:43 crc kubenswrapper[4871]: W1126 05:25:43.320770 4871 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.44:6443: connect: connection refused Nov 26 05:25:43 crc kubenswrapper[4871]: E1126 05:25:43.320882 4871 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.44:6443: connect: connection refused" logger="UnhandledError" Nov 26 05:25:43 crc kubenswrapper[4871]: I1126 05:25:43.432205 4871 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.44:6443: connect: connection refused Nov 26 05:25:43 crc kubenswrapper[4871]: W1126 05:25:43.477324 4871 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.44:6443: connect: connection refused Nov 26 05:25:43 crc kubenswrapper[4871]: E1126 05:25:43.477450 4871 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.44:6443: connect: connection refused" logger="UnhandledError" Nov 26 05:25:43 crc kubenswrapper[4871]: I1126 05:25:43.515015 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"7b644e431efc97e8949f7b5acf6600baacf8e497cbb5385127272dd87bb323f9"} Nov 26 05:25:43 crc kubenswrapper[4871]: I1126 05:25:43.516777 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"703d2f7f6089c3af76d14414e8a5ac68e44d31cce645115a00ec48230908debd"} Nov 26 05:25:43 crc kubenswrapper[4871]: I1126 05:25:43.517916 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"95ca4335ae4a6b25fe3d0d7cf1602e3a2501b02cdbb33d29d8a2b8c4274c505d"} Nov 26 05:25:43 crc kubenswrapper[4871]: I1126 05:25:43.519139 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"69d67bccd1414617a37855c3cd27620b9aa8df0f310b755d95c208b1e3b851e6"} Nov 26 05:25:43 crc kubenswrapper[4871]: I1126 05:25:43.520178 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e37080175ee2df3ed44f9336d89b297b597944f49e87c99554a7327376df709c"} Nov 26 05:25:43 crc kubenswrapper[4871]: W1126 05:25:43.638334 4871 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.44:6443: connect: connection refused Nov 26 05:25:43 crc kubenswrapper[4871]: E1126 05:25:43.639089 4871 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.44:6443: connect: connection refused" logger="UnhandledError" Nov 26 05:25:43 crc kubenswrapper[4871]: W1126 05:25:43.817818 4871 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.44:6443: connect: connection refused Nov 26 05:25:43 crc kubenswrapper[4871]: E1126 05:25:43.817976 4871 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.44:6443: connect: connection refused" logger="UnhandledError" Nov 26 05:25:43 crc kubenswrapper[4871]: E1126 05:25:43.840587 4871 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.44:6443: connect: connection refused" interval="1.6s" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.105792 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.107389 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.107427 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.107678 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.107726 4871 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 05:25:44 crc kubenswrapper[4871]: E1126 05:25:44.108370 4871 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.44:6443: connect: connection refused" node="crc" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.431893 4871 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.44:6443: connect: connection refused Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.528189 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144"} Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.528273 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc"} Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.528285 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.528304 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0"} Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.528349 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f"} Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.529911 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.529972 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.530000 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.531502 4871 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4" exitCode=0 Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.531653 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4"} Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.531699 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.533123 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.533184 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.533205 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.534579 4871 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c" exitCode=0 Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.534670 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c"} Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.534683 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.536336 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.536395 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.536425 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.536474 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.537908 4871 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="92ecf83f0c0a361243b52f7d057c68404fe1842b44caed636117b9ea5e956ef7" exitCode=0 Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.538022 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.538022 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"92ecf83f0c0a361243b52f7d057c68404fe1842b44caed636117b9ea5e956ef7"} Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.538072 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.538110 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.538142 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.539197 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.539240 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.539259 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.541881 4871 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246" exitCode=0 Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.541936 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246"} Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.542004 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.543064 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.543097 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:44 crc kubenswrapper[4871]: I1126 05:25:44.543114 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:45 crc kubenswrapper[4871]: W1126 05:25:45.378697 4871 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.44:6443: connect: connection refused Nov 26 05:25:45 crc kubenswrapper[4871]: E1126 05:25:45.378796 4871 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.44:6443: connect: connection refused" logger="UnhandledError" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.431511 4871 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.44:6443: connect: connection refused Nov 26 05:25:45 crc kubenswrapper[4871]: E1126 05:25:45.442059 4871 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.44:6443: connect: connection refused" interval="3.2s" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.549179 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"c147cae302ede8de5204573d8405e9aee2503d957606138e742af17dfd03f6fc"} Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.549274 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.550626 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.550672 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.550686 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.553101 4871 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="00bc579651cf8005d4ec57d66916b373599060eb33a5ebfb6de190a8402db294" exitCode=0 Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.553145 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"00bc579651cf8005d4ec57d66916b373599060eb33a5ebfb6de190a8402db294"} Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.553218 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.554700 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.554741 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.554752 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.556939 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.556915 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"6f55fc830fdd852727a8ac6714209b06ef8394a19d313752c316fd0901a47f2c"} Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.557051 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"bb1e537a2837f366cb6a6343ffdcf998611f07d8c19f4fe9c0111862520ebbe5"} Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.557065 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"3e936d7790749be736341822bb370fc8729d1e006bffe538ff480a090b856cce"} Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.562748 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.562771 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.562779 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.565459 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438"} Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.565488 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.565493 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51"} Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.565517 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934"} Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.565558 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d"} Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.566437 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.566474 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.566485 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.709280 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.713029 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.713064 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.713078 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:45 crc kubenswrapper[4871]: I1126 05:25:45.713101 4871 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 05:25:45 crc kubenswrapper[4871]: E1126 05:25:45.713654 4871 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.44:6443: connect: connection refused" node="crc" Nov 26 05:25:45 crc kubenswrapper[4871]: W1126 05:25:45.797874 4871 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.44:6443: connect: connection refused Nov 26 05:25:45 crc kubenswrapper[4871]: E1126 05:25:45.797964 4871 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.44:6443: connect: connection refused" logger="UnhandledError" Nov 26 05:25:45 crc kubenswrapper[4871]: W1126 05:25:45.839708 4871 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.44:6443: connect: connection refused Nov 26 05:25:45 crc kubenswrapper[4871]: E1126 05:25:45.839793 4871 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.44:6443: connect: connection refused" logger="UnhandledError" Nov 26 05:25:45 crc kubenswrapper[4871]: W1126 05:25:45.965341 4871 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.44:6443: connect: connection refused Nov 26 05:25:45 crc kubenswrapper[4871]: E1126 05:25:45.965467 4871 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.44:6443: connect: connection refused" logger="UnhandledError" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.305449 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.571287 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea"} Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.571452 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.572699 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.572750 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.572768 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.573306 4871 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="2c55ab22881a3d95186770df81c358e2240645c5c3135e06f7f1bc7eaabf05fc" exitCode=0 Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.573360 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"2c55ab22881a3d95186770df81c358e2240645c5c3135e06f7f1bc7eaabf05fc"} Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.573421 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.574907 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.575666 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.578140 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.578181 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.578194 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.578278 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.578325 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.578348 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.579001 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.579034 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.579046 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.892375 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.892520 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.893695 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.893725 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:46 crc kubenswrapper[4871]: I1126 05:25:46.893733 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:47 crc kubenswrapper[4871]: I1126 05:25:47.581357 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f6c16d05bc5c19f2ba70e823413e81783d27d44186c57b9b5a268e33419553cd"} Nov 26 05:25:47 crc kubenswrapper[4871]: I1126 05:25:47.581423 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"2928343a7cd135d16835e49258ecebc428ce67530421f5b965056a20e7050892"} Nov 26 05:25:47 crc kubenswrapper[4871]: I1126 05:25:47.581449 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"975000d63bf189788268cf4963fcdfe7a6214e1a499d293eeee48bc109f91ab9"} Nov 26 05:25:47 crc kubenswrapper[4871]: I1126 05:25:47.581470 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"090e03c19340014122c87c5df4854f8d48db29da95124ef7858c2a83e955eafa"} Nov 26 05:25:47 crc kubenswrapper[4871]: I1126 05:25:47.581498 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:47 crc kubenswrapper[4871]: I1126 05:25:47.581677 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:47 crc kubenswrapper[4871]: I1126 05:25:47.581707 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:25:47 crc kubenswrapper[4871]: I1126 05:25:47.583012 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:47 crc kubenswrapper[4871]: I1126 05:25:47.583045 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:47 crc kubenswrapper[4871]: I1126 05:25:47.583055 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:47 crc kubenswrapper[4871]: I1126 05:25:47.583122 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:47 crc kubenswrapper[4871]: I1126 05:25:47.583169 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:47 crc kubenswrapper[4871]: I1126 05:25:47.583189 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:48 crc kubenswrapper[4871]: I1126 05:25:48.587750 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:48 crc kubenswrapper[4871]: I1126 05:25:48.588491 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:48 crc kubenswrapper[4871]: I1126 05:25:48.589069 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"1dc3ef5b7c2b446658eebe4accb590ad7292700ed43f5f070bc215ed311019f2"} Nov 26 05:25:48 crc kubenswrapper[4871]: I1126 05:25:48.589373 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:48 crc kubenswrapper[4871]: I1126 05:25:48.589420 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:48 crc kubenswrapper[4871]: I1126 05:25:48.589441 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:48 crc kubenswrapper[4871]: I1126 05:25:48.589780 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:48 crc kubenswrapper[4871]: I1126 05:25:48.589850 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:48 crc kubenswrapper[4871]: I1126 05:25:48.589874 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:48 crc kubenswrapper[4871]: I1126 05:25:48.914399 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:48 crc kubenswrapper[4871]: I1126 05:25:48.916453 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:48 crc kubenswrapper[4871]: I1126 05:25:48.916514 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:48 crc kubenswrapper[4871]: I1126 05:25:48.916558 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:48 crc kubenswrapper[4871]: I1126 05:25:48.916592 4871 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.140881 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.141066 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.141990 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.143131 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.143190 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.143216 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.154281 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.492648 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.592521 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.592730 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.592815 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.594766 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.594835 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.594862 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.594967 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.595008 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.595028 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.595444 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.595478 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.595507 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.658387 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.878330 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.892783 4871 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 26 05:25:49 crc kubenswrapper[4871]: I1126 05:25:49.892889 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 05:25:50 crc kubenswrapper[4871]: I1126 05:25:50.365033 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 26 05:25:50 crc kubenswrapper[4871]: I1126 05:25:50.595100 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:50 crc kubenswrapper[4871]: I1126 05:25:50.595154 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:50 crc kubenswrapper[4871]: I1126 05:25:50.595372 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:50 crc kubenswrapper[4871]: I1126 05:25:50.596925 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:50 crc kubenswrapper[4871]: I1126 05:25:50.596991 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:50 crc kubenswrapper[4871]: I1126 05:25:50.597023 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:50 crc kubenswrapper[4871]: I1126 05:25:50.597039 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:50 crc kubenswrapper[4871]: I1126 05:25:50.597097 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:50 crc kubenswrapper[4871]: I1126 05:25:50.597132 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:50 crc kubenswrapper[4871]: I1126 05:25:50.597152 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:50 crc kubenswrapper[4871]: I1126 05:25:50.596996 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:50 crc kubenswrapper[4871]: I1126 05:25:50.597247 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:51 crc kubenswrapper[4871]: I1126 05:25:51.597684 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:51 crc kubenswrapper[4871]: I1126 05:25:51.599003 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:51 crc kubenswrapper[4871]: I1126 05:25:51.599037 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:51 crc kubenswrapper[4871]: I1126 05:25:51.599048 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:52 crc kubenswrapper[4871]: I1126 05:25:52.280346 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 26 05:25:52 crc kubenswrapper[4871]: I1126 05:25:52.280639 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:52 crc kubenswrapper[4871]: I1126 05:25:52.282086 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:52 crc kubenswrapper[4871]: I1126 05:25:52.282182 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:52 crc kubenswrapper[4871]: I1126 05:25:52.282201 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:52 crc kubenswrapper[4871]: E1126 05:25:52.605290 4871 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 26 05:25:56 crc kubenswrapper[4871]: I1126 05:25:56.432825 4871 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 26 05:25:57 crc kubenswrapper[4871]: I1126 05:25:57.081925 4871 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 26 05:25:57 crc kubenswrapper[4871]: I1126 05:25:57.082038 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 26 05:25:57 crc kubenswrapper[4871]: I1126 05:25:57.086718 4871 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 26 05:25:57 crc kubenswrapper[4871]: I1126 05:25:57.086789 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 26 05:25:59 crc kubenswrapper[4871]: I1126 05:25:59.499045 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:25:59 crc kubenswrapper[4871]: I1126 05:25:59.499242 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:59 crc kubenswrapper[4871]: I1126 05:25:59.500741 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:59 crc kubenswrapper[4871]: I1126 05:25:59.500799 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:59 crc kubenswrapper[4871]: I1126 05:25:59.500820 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:59 crc kubenswrapper[4871]: I1126 05:25:59.670500 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:25:59 crc kubenswrapper[4871]: I1126 05:25:59.670757 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:25:59 crc kubenswrapper[4871]: I1126 05:25:59.672631 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:25:59 crc kubenswrapper[4871]: I1126 05:25:59.672693 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:25:59 crc kubenswrapper[4871]: I1126 05:25:59.672712 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:25:59 crc kubenswrapper[4871]: I1126 05:25:59.678630 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:25:59 crc kubenswrapper[4871]: I1126 05:25:59.893336 4871 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 26 05:25:59 crc kubenswrapper[4871]: I1126 05:25:59.893423 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 26 05:26:00 crc kubenswrapper[4871]: I1126 05:26:00.412768 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 26 05:26:00 crc kubenswrapper[4871]: I1126 05:26:00.412983 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:26:00 crc kubenswrapper[4871]: I1126 05:26:00.414314 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:00 crc kubenswrapper[4871]: I1126 05:26:00.414364 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:00 crc kubenswrapper[4871]: I1126 05:26:00.414386 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:00 crc kubenswrapper[4871]: I1126 05:26:00.432974 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 26 05:26:00 crc kubenswrapper[4871]: I1126 05:26:00.620921 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:26:00 crc kubenswrapper[4871]: I1126 05:26:00.621015 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:26:00 crc kubenswrapper[4871]: I1126 05:26:00.622124 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:00 crc kubenswrapper[4871]: I1126 05:26:00.622184 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:00 crc kubenswrapper[4871]: I1126 05:26:00.622202 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:00 crc kubenswrapper[4871]: I1126 05:26:00.622206 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:00 crc kubenswrapper[4871]: I1126 05:26:00.622228 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:00 crc kubenswrapper[4871]: I1126 05:26:00.622342 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:02 crc kubenswrapper[4871]: E1126 05:26:02.066788 4871 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.070059 4871 trace.go:236] Trace[907440919]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 05:25:51.764) (total time: 10305ms): Nov 26 05:26:02 crc kubenswrapper[4871]: Trace[907440919]: ---"Objects listed" error: 10305ms (05:26:02.069) Nov 26 05:26:02 crc kubenswrapper[4871]: Trace[907440919]: [10.305101768s] [10.305101768s] END Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.070094 4871 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.070694 4871 trace.go:236] Trace[25940584]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 05:25:49.312) (total time: 12758ms): Nov 26 05:26:02 crc kubenswrapper[4871]: Trace[25940584]: ---"Objects listed" error: 12758ms (05:26:02.070) Nov 26 05:26:02 crc kubenswrapper[4871]: Trace[25940584]: [12.758356468s] [12.758356468s] END Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.070753 4871 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.072894 4871 trace.go:236] Trace[1228318046]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 05:25:49.672) (total time: 12400ms): Nov 26 05:26:02 crc kubenswrapper[4871]: Trace[1228318046]: ---"Objects listed" error: 12400ms (05:26:02.072) Nov 26 05:26:02 crc kubenswrapper[4871]: Trace[1228318046]: [12.400553222s] [12.400553222s] END Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.072932 4871 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 26 05:26:02 crc kubenswrapper[4871]: E1126 05:26:02.073241 4871 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.073505 4871 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.074508 4871 trace.go:236] Trace[1538417303]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 05:25:51.161) (total time: 10912ms): Nov 26 05:26:02 crc kubenswrapper[4871]: Trace[1538417303]: ---"Objects listed" error: 10912ms (05:26:02.074) Nov 26 05:26:02 crc kubenswrapper[4871]: Trace[1538417303]: [10.912608063s] [10.912608063s] END Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.074573 4871 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.122370 4871 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:33886->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.122451 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:33886->192.168.126.11:17697: read: connection reset by peer" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.122457 4871 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:33892->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.122573 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:33892->192.168.126.11:17697: read: connection reset by peer" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.122947 4871 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.122989 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.431148 4871 apiserver.go:52] "Watching apiserver" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.434195 4871 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.434504 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb"] Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.434842 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.434939 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.434953 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:02 crc kubenswrapper[4871]: E1126 05:26:02.435038 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:02 crc kubenswrapper[4871]: E1126 05:26:02.435069 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.435367 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.436301 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:02 crc kubenswrapper[4871]: E1126 05:26:02.436348 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.436366 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.438108 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.438385 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.438712 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.440202 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.440379 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.440626 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.442378 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.442595 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.443515 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.472571 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.475788 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.475843 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.475880 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.475923 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.475955 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.475990 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.476021 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.476055 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.476087 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.476116 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.476147 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.476178 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.478365 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.478617 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.479639 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.480265 4871 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.484957 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.489132 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.489265 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 05:26:02 crc kubenswrapper[4871]: E1126 05:26:02.497427 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 05:26:02 crc kubenswrapper[4871]: E1126 05:26:02.497454 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 05:26:02 crc kubenswrapper[4871]: E1126 05:26:02.497466 4871 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:02 crc kubenswrapper[4871]: E1126 05:26:02.497515 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:02.997498917 +0000 UTC m=+21.180550503 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.500635 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.502117 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: E1126 05:26:02.505312 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 05:26:02 crc kubenswrapper[4871]: E1126 05:26:02.505340 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 05:26:02 crc kubenswrapper[4871]: E1126 05:26:02.505356 4871 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:02 crc kubenswrapper[4871]: E1126 05:26:02.505423 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:03.005402966 +0000 UTC m=+21.188454662 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.508090 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.511291 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.516741 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.529038 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.536340 4871 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.538696 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.553571 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.563994 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.575070 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.577439 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.577489 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.577549 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.577583 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.577616 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.577648 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.577678 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.577711 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.577805 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.577840 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.577882 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.577884 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.577917 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.577952 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.577987 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578019 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578051 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.577938 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578078 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.577952 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578102 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578085 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578163 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578198 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578240 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578262 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578268 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578269 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578285 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578320 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578357 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578376 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578397 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578430 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578452 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578461 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578489 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578518 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578565 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578573 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578574 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578635 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578644 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578664 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578689 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578756 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578804 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578839 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578841 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578862 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578885 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578887 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578906 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578929 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.578929 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579017 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579045 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579068 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579083 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579091 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579090 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579113 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579135 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579136 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579155 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579179 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579200 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579218 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579237 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579254 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: E1126 05:26:02.579264 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:26:03.079245025 +0000 UTC m=+21.262296721 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579291 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579322 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579346 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579368 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579392 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579449 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579472 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579494 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579560 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579581 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579598 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579618 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579638 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579649 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579509 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579660 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579716 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579737 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579754 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579734 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579821 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579848 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579874 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579897 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579908 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579919 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.579946 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580011 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580021 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580042 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580181 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580186 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580199 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580214 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580068 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580318 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580328 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580343 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580368 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580391 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580416 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580439 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580464 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580485 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580506 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580546 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580568 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580593 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580615 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580635 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580683 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580706 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580733 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580777 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580805 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580835 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580862 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580903 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580939 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.580970 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.581001 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.581025 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.581047 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.581071 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.581074 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.581095 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.581117 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.581162 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.581183 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.581383 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.581405 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.581426 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.581449 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.581470 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.581491 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.581565 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.581649 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.581954 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582003 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582027 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582036 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582106 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582144 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582179 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582187 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582221 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582272 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582309 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582345 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582377 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582411 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582418 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582448 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582482 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582554 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582592 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584270 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584309 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584344 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584379 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584415 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584450 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584483 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584517 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584574 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584630 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584667 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584701 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584735 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584770 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584802 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584838 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584870 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584904 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584937 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584970 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.585001 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.585033 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.585066 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.588762 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.590190 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.590252 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.590417 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.590469 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.590563 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.590627 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.590794 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.590845 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.590897 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.590941 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.590980 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591046 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591093 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591133 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591180 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591226 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591264 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591314 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591358 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591403 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591441 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591485 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591549 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591615 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591660 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591703 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591749 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591916 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591961 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591997 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.592079 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.592123 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.592160 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.592204 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.592251 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.592340 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.592387 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.592429 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.592470 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.593078 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.593138 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.593182 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594007 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594065 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594099 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594142 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594214 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594250 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594400 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594499 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594665 4871 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594683 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594699 4871 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594714 4871 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594733 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594747 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594764 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594784 4871 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594799 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594814 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594829 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594847 4871 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594865 4871 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594879 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594893 4871 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594912 4871 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594926 4871 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594940 4871 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594964 4871 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594981 4871 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.596596 4871 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.596619 4871 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.596641 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.596655 4871 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.596671 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.598079 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.598999 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599040 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599066 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599082 4871 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599101 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599117 4871 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599286 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599137 4871 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599387 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599405 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599430 4871 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599445 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599461 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599475 4871 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599494 4871 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599509 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582552 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582649 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.602847 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582922 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582978 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.583005 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.583069 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.583637 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584274 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584238 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584363 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584612 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.584675 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.585109 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.589010 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.589446 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.589622 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.590091 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.590367 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.590398 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.590796 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591068 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.603105 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591483 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591694 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.591833 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.592167 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.592178 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.582938 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.592230 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.592807 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.593236 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.592846 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.593459 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.593427 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.593722 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.593896 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594127 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594254 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594444 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.594974 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.595305 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.595334 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.595703 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.596254 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.596778 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.597257 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: E1126 05:26:02.597426 4871 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.597602 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.597837 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.598652 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.598808 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599114 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599023 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: E1126 05:26:02.599394 4871 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599481 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599829 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.599819 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.600118 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.600142 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.600217 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.600465 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.600509 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.600925 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.601043 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.601711 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.601816 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.601956 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.602097 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.602182 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.602340 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.602417 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.602403 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.603177 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.603261 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.603770 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.603874 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.603987 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.604152 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.604922 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.606158 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.606884 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.607085 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.607140 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.607854 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.608132 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.608676 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.609078 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.609405 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.609961 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.611000 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.611641 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.611737 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.611887 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.611916 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.612108 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.614215 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.614869 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: E1126 05:26:02.616652 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:03.116606135 +0000 UTC m=+21.299657741 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.616666 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.616776 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: E1126 05:26:02.616816 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:03.116791159 +0000 UTC m=+21.299842755 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.616946 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.617018 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.617079 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.617105 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.617165 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.617180 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.617378 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.617448 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.617463 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.617513 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.618411 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.618724 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.618946 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.619182 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.622133 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.622406 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.623074 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.623228 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.623292 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.623489 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.624101 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.624241 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.624245 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.624566 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.624992 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.625048 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.625125 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.625294 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.619734 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.627548 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.627556 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.628011 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.628205 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.628230 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.631683 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.631921 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.632005 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.632056 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.633104 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.635178 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.638586 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.638601 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.638930 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.638983 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.641743 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.641766 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.642038 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.655966 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.655992 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.656096 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.656775 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.657045 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.661897 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.668966 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.670058 4871 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea" exitCode=255 Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.670079 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea"} Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.675095 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.690238 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.692840 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.696054 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700664 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700689 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700698 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700708 4871 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700717 4871 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700727 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700736 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700745 4871 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700754 4871 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700763 4871 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700772 4871 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700781 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700789 4871 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700797 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700805 4871 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700812 4871 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700822 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700833 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700843 4871 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700853 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700864 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700872 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700880 4871 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700888 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700897 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700905 4871 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700913 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700921 4871 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700930 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700941 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700949 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700957 4871 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700966 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700975 4871 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700982 4871 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.700993 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701003 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701011 4871 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701018 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701026 4871 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701035 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701043 4871 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701052 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701061 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701069 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701076 4871 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701084 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701092 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701100 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701107 4871 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701116 4871 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701124 4871 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701132 4871 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701140 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701149 4871 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701157 4871 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701165 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701173 4871 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701182 4871 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701190 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701200 4871 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701208 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701215 4871 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701224 4871 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701233 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701241 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701249 4871 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701257 4871 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701264 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701272 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701279 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701287 4871 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701296 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701304 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701311 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701320 4871 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701328 4871 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701337 4871 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701345 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701372 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701380 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701388 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701397 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701405 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701417 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701425 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701433 4871 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701441 4871 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701449 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701457 4871 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701465 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701472 4871 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701480 4871 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701488 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701497 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701506 4871 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701514 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701539 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701548 4871 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701557 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701565 4871 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701574 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701582 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701590 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701598 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701605 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701614 4871 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701621 4871 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701629 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701638 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701646 4871 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701654 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701662 4871 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701670 4871 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701677 4871 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701685 4871 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701695 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701702 4871 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701711 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701719 4871 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701727 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701736 4871 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701745 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701754 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701762 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701771 4871 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701779 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701787 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701795 4871 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701803 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701817 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701827 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701838 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701850 4871 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701860 4871 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701870 4871 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701880 4871 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701890 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701900 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701909 4871 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701917 4871 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701924 4871 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701931 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701939 4871 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701946 4871 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701954 4871 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701962 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701969 4871 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701978 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701985 4871 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.701994 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.702002 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.703360 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.707137 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.708630 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.726181 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.750195 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.750364 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.759567 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 05:26:02 crc kubenswrapper[4871]: W1126 05:26:02.764906 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-61694ac6699f8d878f3b809c4a1d32cad84e211f9214e3f86cff78a62d9a7de7 WatchSource:0}: Error finding container 61694ac6699f8d878f3b809c4a1d32cad84e211f9214e3f86cff78a62d9a7de7: Status 404 returned error can't find the container with id 61694ac6699f8d878f3b809c4a1d32cad84e211f9214e3f86cff78a62d9a7de7 Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.767035 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.772093 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: W1126 05:26:02.774564 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-22acc6c33fa85f7c6c3b97adb9ff63be65a5b85e1052f3736b156a35e05753fc WatchSource:0}: Error finding container 22acc6c33fa85f7c6c3b97adb9ff63be65a5b85e1052f3736b156a35e05753fc: Status 404 returned error can't find the container with id 22acc6c33fa85f7c6c3b97adb9ff63be65a5b85e1052f3736b156a35e05753fc Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.780096 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.790415 4871 scope.go:117] "RemoveContainer" containerID="7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.803794 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.804771 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.804793 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.820443 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.842128 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.859141 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.958729 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-2jk6j"] Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.959103 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-2jk6j" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.962488 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.962515 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.963372 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.973954 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.984631 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:02 crc kubenswrapper[4871]: I1126 05:26:02.999652 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.007944 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.007993 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:03 crc kubenswrapper[4871]: E1126 05:26:03.008096 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 05:26:03 crc kubenswrapper[4871]: E1126 05:26:03.008125 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 05:26:03 crc kubenswrapper[4871]: E1126 05:26:03.008137 4871 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:03 crc kubenswrapper[4871]: E1126 05:26:03.008177 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:04.008163924 +0000 UTC m=+22.191215510 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:03 crc kubenswrapper[4871]: E1126 05:26:03.008224 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 05:26:03 crc kubenswrapper[4871]: E1126 05:26:03.008232 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 05:26:03 crc kubenswrapper[4871]: E1126 05:26:03.008239 4871 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:03 crc kubenswrapper[4871]: E1126 05:26:03.008257 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:04.008251267 +0000 UTC m=+22.191302853 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.011301 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.021347 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.034246 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.041846 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.047988 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.108594 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.108681 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/e5b85376-eda9-4770-ad55-b7a59a00e3f3-hosts-file\") pod \"node-resolver-2jk6j\" (UID: \"e5b85376-eda9-4770-ad55-b7a59a00e3f3\") " pod="openshift-dns/node-resolver-2jk6j" Nov 26 05:26:03 crc kubenswrapper[4871]: E1126 05:26:03.108760 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:26:04.108729691 +0000 UTC m=+22.291781287 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.108838 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85lq9\" (UniqueName: \"kubernetes.io/projected/e5b85376-eda9-4770-ad55-b7a59a00e3f3-kube-api-access-85lq9\") pod \"node-resolver-2jk6j\" (UID: \"e5b85376-eda9-4770-ad55-b7a59a00e3f3\") " pod="openshift-dns/node-resolver-2jk6j" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.210024 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.210066 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85lq9\" (UniqueName: \"kubernetes.io/projected/e5b85376-eda9-4770-ad55-b7a59a00e3f3-kube-api-access-85lq9\") pod \"node-resolver-2jk6j\" (UID: \"e5b85376-eda9-4770-ad55-b7a59a00e3f3\") " pod="openshift-dns/node-resolver-2jk6j" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.210096 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.210124 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/e5b85376-eda9-4770-ad55-b7a59a00e3f3-hosts-file\") pod \"node-resolver-2jk6j\" (UID: \"e5b85376-eda9-4770-ad55-b7a59a00e3f3\") " pod="openshift-dns/node-resolver-2jk6j" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.210201 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/e5b85376-eda9-4770-ad55-b7a59a00e3f3-hosts-file\") pod \"node-resolver-2jk6j\" (UID: \"e5b85376-eda9-4770-ad55-b7a59a00e3f3\") " pod="openshift-dns/node-resolver-2jk6j" Nov 26 05:26:03 crc kubenswrapper[4871]: E1126 05:26:03.210298 4871 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 05:26:03 crc kubenswrapper[4871]: E1126 05:26:03.210327 4871 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 05:26:03 crc kubenswrapper[4871]: E1126 05:26:03.210391 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:04.210369942 +0000 UTC m=+22.393421608 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 05:26:03 crc kubenswrapper[4871]: E1126 05:26:03.210408 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:04.210401183 +0000 UTC m=+22.393452879 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.231944 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85lq9\" (UniqueName: \"kubernetes.io/projected/e5b85376-eda9-4770-ad55-b7a59a00e3f3-kube-api-access-85lq9\") pod \"node-resolver-2jk6j\" (UID: \"e5b85376-eda9-4770-ad55-b7a59a00e3f3\") " pod="openshift-dns/node-resolver-2jk6j" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.297376 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-2jk6j" Nov 26 05:26:03 crc kubenswrapper[4871]: W1126 05:26:03.312213 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode5b85376_eda9_4770_ad55_b7a59a00e3f3.slice/crio-d0fd101f7ea51ebd348ef3b3ae853e46da7dcece8a934a5c36baa951b0199185 WatchSource:0}: Error finding container d0fd101f7ea51ebd348ef3b3ae853e46da7dcece8a934a5c36baa951b0199185: Status 404 returned error can't find the container with id d0fd101f7ea51ebd348ef3b3ae853e46da7dcece8a934a5c36baa951b0199185 Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.339756 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-4scr4"] Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.340574 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.344950 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-rpr6z"] Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.345248 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.345380 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-qzw7d"] Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.346085 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.346695 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.346722 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.346737 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-zmlz2"] Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.346773 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.346808 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.346872 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.346951 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.351830 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.351861 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.351879 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.352516 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.360082 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.360875 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.360894 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.361351 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.361566 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.369406 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.369497 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.369617 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.369868 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.369897 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.369958 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.384491 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.400796 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.413848 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-os-release\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.413889 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-system-cni-dir\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.413905 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.413938 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-cnibin\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.413961 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-cni-binary-copy\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.413983 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mg9b\" (UniqueName: \"kubernetes.io/projected/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-kube-api-access-5mg9b\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.413997 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.415874 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.426221 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.439301 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.450625 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.466641 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.480133 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.495597 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.514126 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.514764 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-etc-openvswitch\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.514802 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-cni-netd\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.514842 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-cnibin\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.514860 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-host-run-k8s-cni-cncf-io\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.514949 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-cnibin\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515042 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-cni-binary-copy\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515111 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-host-var-lib-cni-bin\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515162 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-systemd-units\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515262 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-kubelet\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515298 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-run-openvswitch\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515323 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-run-ovn-kubernetes\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515407 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/3cd6a6d4-9b5f-4d27-a839-d37960bff02c-proxy-tls\") pod \"machine-config-daemon-zmlz2\" (UID: \"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\") " pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515457 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-cnibin\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515496 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mg9b\" (UniqueName: \"kubernetes.io/projected/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-kube-api-access-5mg9b\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515551 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-slash\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515577 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nzm5\" (UniqueName: \"kubernetes.io/projected/6a0aba42-7edc-4d81-850e-3e3439eeaec8-kube-api-access-9nzm5\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515669 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-etc-kubernetes\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515693 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6a0aba42-7edc-4d81-850e-3e3439eeaec8-ovnkube-script-lib\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515727 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-os-release\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515756 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-system-cni-dir\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515811 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/3cd6a6d4-9b5f-4d27-a839-d37960bff02c-rootfs\") pod \"machine-config-daemon-zmlz2\" (UID: \"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\") " pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515857 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-system-cni-dir\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515866 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-host-run-multus-certs\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515864 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-cni-binary-copy\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515895 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/84290973-bc95-4326-bacd-7c210346620a-multus-daemon-config\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.515957 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-run-systemd\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.516008 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-multus-cni-dir\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.516033 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6a0aba42-7edc-4d81-850e-3e3439eeaec8-env-overrides\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.516070 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-os-release\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.516085 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-cni-bin\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.516112 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6a0aba42-7edc-4d81-850e-3e3439eeaec8-ovnkube-config\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.516154 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-hostroot\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.516180 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mshq8\" (UniqueName: \"kubernetes.io/projected/84290973-bc95-4326-bacd-7c210346620a-kube-api-access-mshq8\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.516249 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3cd6a6d4-9b5f-4d27-a839-d37960bff02c-mcd-auth-proxy-config\") pod \"machine-config-daemon-zmlz2\" (UID: \"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\") " pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.516274 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-host-var-lib-cni-multus\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.516294 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-multus-conf-dir\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.516314 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-os-release\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.516356 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-log-socket\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.516380 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.516440 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6a0aba42-7edc-4d81-850e-3e3439eeaec8-ovn-node-metrics-cert\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.516499 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.517208 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.517258 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-host-run-netns\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.517301 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-run-netns\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.517326 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-run-ovn\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.517379 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-multus-socket-dir-parent\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.517440 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-host-var-lib-kubelet\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.517465 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-var-lib-openvswitch\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.517519 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2nq7\" (UniqueName: \"kubernetes.io/projected/3cd6a6d4-9b5f-4d27-a839-d37960bff02c-kube-api-access-t2nq7\") pod \"machine-config-daemon-zmlz2\" (UID: \"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\") " pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.517583 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.517642 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-node-log\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.517672 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-system-cni-dir\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.517819 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/84290973-bc95-4326-bacd-7c210346620a-cni-binary-copy\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.518215 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.536056 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.543703 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.546275 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mg9b\" (UniqueName: \"kubernetes.io/projected/06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c-kube-api-access-5mg9b\") pod \"multus-additional-cni-plugins-4scr4\" (UID: \"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\") " pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.556759 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.564555 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.573505 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.582136 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.589427 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.598754 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.607558 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618213 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/3cd6a6d4-9b5f-4d27-a839-d37960bff02c-proxy-tls\") pod \"machine-config-daemon-zmlz2\" (UID: \"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\") " pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618251 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-cnibin\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618271 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-kubelet\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618289 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-run-openvswitch\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618310 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-run-ovn-kubernetes\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618342 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-slash\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618362 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nzm5\" (UniqueName: \"kubernetes.io/projected/6a0aba42-7edc-4d81-850e-3e3439eeaec8-kube-api-access-9nzm5\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618382 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6a0aba42-7edc-4d81-850e-3e3439eeaec8-ovnkube-script-lib\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618391 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-cnibin\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618404 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-etc-kubernetes\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618452 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-etc-kubernetes\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618466 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-host-run-multus-certs\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618491 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-host-run-multus-certs\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618506 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/3cd6a6d4-9b5f-4d27-a839-d37960bff02c-rootfs\") pod \"machine-config-daemon-zmlz2\" (UID: \"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\") " pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618518 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-run-ovn-kubernetes\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618553 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-multus-cni-dir\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618565 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-kubelet\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618581 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/84290973-bc95-4326-bacd-7c210346620a-multus-daemon-config\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618588 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-run-openvswitch\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618609 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-run-systemd\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618650 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-cni-bin\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618663 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-slash\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618675 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6a0aba42-7edc-4d81-850e-3e3439eeaec8-ovnkube-config\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.619443 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6a0aba42-7edc-4d81-850e-3e3439eeaec8-env-overrides\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.619469 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mshq8\" (UniqueName: \"kubernetes.io/projected/84290973-bc95-4326-bacd-7c210346620a-kube-api-access-mshq8\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618888 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-cni-bin\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618779 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-run-systemd\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618954 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-multus-cni-dir\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.619218 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6a0aba42-7edc-4d81-850e-3e3439eeaec8-ovnkube-script-lib\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.619336 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/84290973-bc95-4326-bacd-7c210346620a-multus-daemon-config\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.619376 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6a0aba42-7edc-4d81-850e-3e3439eeaec8-ovnkube-config\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.618907 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/3cd6a6d4-9b5f-4d27-a839-d37960bff02c-rootfs\") pod \"machine-config-daemon-zmlz2\" (UID: \"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\") " pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.619493 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-hostroot\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.619841 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-multus-conf-dir\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.619893 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-hostroot\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.619921 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-multus-conf-dir\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.619858 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3cd6a6d4-9b5f-4d27-a839-d37960bff02c-mcd-auth-proxy-config\") pod \"machine-config-daemon-zmlz2\" (UID: \"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\") " pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.619954 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-host-var-lib-cni-multus\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.619973 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6a0aba42-7edc-4d81-850e-3e3439eeaec8-env-overrides\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.620027 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-host-var-lib-cni-multus\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.620163 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-os-release\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.620445 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3cd6a6d4-9b5f-4d27-a839-d37960bff02c-mcd-auth-proxy-config\") pod \"machine-config-daemon-zmlz2\" (UID: \"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\") " pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.620497 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-os-release\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.620553 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-log-socket\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.620573 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.620622 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-log-socket\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.620651 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.620674 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-run-netns\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.620694 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-run-ovn\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.620746 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-run-netns\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.620779 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-run-ovn\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.620802 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6a0aba42-7edc-4d81-850e-3e3439eeaec8-ovn-node-metrics-cert\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.620823 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-host-run-netns\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.621197 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2nq7\" (UniqueName: \"kubernetes.io/projected/3cd6a6d4-9b5f-4d27-a839-d37960bff02c-kube-api-access-t2nq7\") pod \"machine-config-daemon-zmlz2\" (UID: \"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\") " pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.621245 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-host-run-netns\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.621403 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-multus-socket-dir-parent\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.621435 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-host-var-lib-kubelet\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.621454 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-var-lib-openvswitch\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.621500 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-node-log\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.621555 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-system-cni-dir\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.621606 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/84290973-bc95-4326-bacd-7c210346620a-cni-binary-copy\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.621633 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-host-run-k8s-cni-cncf-io\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.621653 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-etc-openvswitch\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.621674 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-cni-netd\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.621697 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-host-var-lib-cni-bin\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.621718 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-systemd-units\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.621789 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-systemd-units\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.621401 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-multus-socket-dir-parent\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.621831 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-host-var-lib-kubelet\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.621863 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-var-lib-openvswitch\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.621891 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-node-log\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.622032 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-system-cni-dir\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.622542 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/84290973-bc95-4326-bacd-7c210346620a-cni-binary-copy\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.622592 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-host-run-k8s-cni-cncf-io\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.622639 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-etc-openvswitch\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.622672 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-cni-netd\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.622702 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/84290973-bc95-4326-bacd-7c210346620a-host-var-lib-cni-bin\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.623412 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6a0aba42-7edc-4d81-850e-3e3439eeaec8-ovn-node-metrics-cert\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.623482 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/3cd6a6d4-9b5f-4d27-a839-d37960bff02c-proxy-tls\") pod \"machine-config-daemon-zmlz2\" (UID: \"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\") " pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.625876 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.633464 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nzm5\" (UniqueName: \"kubernetes.io/projected/6a0aba42-7edc-4d81-850e-3e3439eeaec8-kube-api-access-9nzm5\") pod \"ovnkube-node-qzw7d\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.640865 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mshq8\" (UniqueName: \"kubernetes.io/projected/84290973-bc95-4326-bacd-7c210346620a-kube-api-access-mshq8\") pod \"multus-rpr6z\" (UID: \"84290973-bc95-4326-bacd-7c210346620a\") " pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.645834 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2nq7\" (UniqueName: \"kubernetes.io/projected/3cd6a6d4-9b5f-4d27-a839-d37960bff02c-kube-api-access-t2nq7\") pod \"machine-config-daemon-zmlz2\" (UID: \"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\") " pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.656918 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-4scr4" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.665807 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-rpr6z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.674172 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.686662 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.690904 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d"} Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.691828 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.691949 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:26:03 crc kubenswrapper[4871]: W1126 05:26:03.692770 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod84290973_bc95_4326_bacd_7c210346620a.slice/crio-7ee7f76ff1656cb8f189c556d09a9c6e91452f752a9722233df5f40bf522f2fb WatchSource:0}: Error finding container 7ee7f76ff1656cb8f189c556d09a9c6e91452f752a9722233df5f40bf522f2fb: Status 404 returned error can't find the container with id 7ee7f76ff1656cb8f189c556d09a9c6e91452f752a9722233df5f40bf522f2fb Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.696228 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4"} Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.696278 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30"} Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.696288 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"22acc6c33fa85f7c6c3b97adb9ff63be65a5b85e1052f3736b156a35e05753fc"} Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.697863 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-2jk6j" event={"ID":"e5b85376-eda9-4770-ad55-b7a59a00e3f3","Type":"ContainerStarted","Data":"3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a"} Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.697950 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-2jk6j" event={"ID":"e5b85376-eda9-4770-ad55-b7a59a00e3f3","Type":"ContainerStarted","Data":"d0fd101f7ea51ebd348ef3b3ae853e46da7dcece8a934a5c36baa951b0199185"} Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.700911 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"ea0040ce28a5492f1a784d82f878259a1e34d60fbc998412d1f65dbf7beb7b71"} Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.703821 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a"} Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.703865 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"61694ac6699f8d878f3b809c4a1d32cad84e211f9214e3f86cff78a62d9a7de7"} Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.705378 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.719224 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.745879 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.775802 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.790637 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.801641 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.817425 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.830609 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.840251 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.862712 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.874606 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.886204 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.901669 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.919297 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.950117 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:03 crc kubenswrapper[4871]: I1126 05:26:03.982725 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.023157 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.027455 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.027491 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:04 crc kubenswrapper[4871]: E1126 05:26:04.027612 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 05:26:04 crc kubenswrapper[4871]: E1126 05:26:04.027627 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 05:26:04 crc kubenswrapper[4871]: E1126 05:26:04.027638 4871 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:04 crc kubenswrapper[4871]: E1126 05:26:04.027672 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:06.027660285 +0000 UTC m=+24.210711871 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:04 crc kubenswrapper[4871]: E1126 05:26:04.027612 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 05:26:04 crc kubenswrapper[4871]: E1126 05:26:04.027710 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 05:26:04 crc kubenswrapper[4871]: E1126 05:26:04.027719 4871 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:04 crc kubenswrapper[4871]: E1126 05:26:04.027745 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:06.027736637 +0000 UTC m=+24.210788223 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.061842 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.100770 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.127955 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:26:04 crc kubenswrapper[4871]: E1126 05:26:04.128116 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:26:06.128080378 +0000 UTC m=+24.311132014 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.146758 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.183903 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.222483 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.228995 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.229066 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:04 crc kubenswrapper[4871]: E1126 05:26:04.229271 4871 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 05:26:04 crc kubenswrapper[4871]: E1126 05:26:04.229355 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:06.22933233 +0000 UTC m=+24.412383956 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 05:26:04 crc kubenswrapper[4871]: E1126 05:26:04.229836 4871 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 05:26:04 crc kubenswrapper[4871]: E1126 05:26:04.229936 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:06.229913244 +0000 UTC m=+24.412964920 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.263564 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.304587 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.507012 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.507071 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.507010 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:04 crc kubenswrapper[4871]: E1126 05:26:04.507132 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:04 crc kubenswrapper[4871]: E1126 05:26:04.507231 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:04 crc kubenswrapper[4871]: E1126 05:26:04.507335 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.511697 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.512366 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.513021 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.513686 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.514276 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.514818 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.515411 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.516026 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.518064 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.518584 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.519110 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.520132 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.520647 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.521506 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.522022 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.522937 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.523483 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.523894 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.526430 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.527228 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.527752 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.528302 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.528779 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.529404 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.529869 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.530459 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.531096 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.531585 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.532182 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.532725 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.533165 4871 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.533262 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.537099 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.537663 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.538077 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.539621 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.540630 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.541132 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.542082 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.542750 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.543577 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.544167 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.545200 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.545784 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.546666 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.547167 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.548018 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.548763 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.549579 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.550042 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.550898 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.551420 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.552061 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.552963 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.707832 4871 generic.go:334] "Generic (PLEG): container finished" podID="06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c" containerID="dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178" exitCode=0 Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.707942 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" event={"ID":"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c","Type":"ContainerDied","Data":"dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178"} Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.708091 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" event={"ID":"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c","Type":"ContainerStarted","Data":"2c21904c1b09f52cc050f575407f0e5ae47940239402debe070d89934f835616"} Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.709268 4871 generic.go:334] "Generic (PLEG): container finished" podID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerID="0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57" exitCode=0 Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.709329 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerDied","Data":"0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57"} Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.709357 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerStarted","Data":"20b0d04aa609bd9448f9eebd0c8d8d4d04c75efa3c26ab1247d21da581e89bc6"} Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.711014 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rpr6z" event={"ID":"84290973-bc95-4326-bacd-7c210346620a","Type":"ContainerStarted","Data":"dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce"} Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.711073 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rpr6z" event={"ID":"84290973-bc95-4326-bacd-7c210346620a","Type":"ContainerStarted","Data":"7ee7f76ff1656cb8f189c556d09a9c6e91452f752a9722233df5f40bf522f2fb"} Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.713228 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c"} Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.713263 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417"} Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.713280 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"96040c53f238282264d6c0660bb8674ce3d1a2441ff26a08d0e1e53bd4fe2413"} Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.742615 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.768953 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.786848 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.798750 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.813677 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.829799 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.851519 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.865549 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.879831 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.895392 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.908322 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.920258 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.931965 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.943090 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.954264 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.963780 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:04 crc kubenswrapper[4871]: I1126 05:26:04.985564 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.022611 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.060389 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.106256 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.149303 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.185281 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.227950 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.265328 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.719186 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a"} Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.722917 4871 generic.go:334] "Generic (PLEG): container finished" podID="06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c" containerID="0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe" exitCode=0 Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.722971 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" event={"ID":"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c","Type":"ContainerDied","Data":"0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe"} Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.730517 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerStarted","Data":"30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea"} Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.730590 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerStarted","Data":"1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c"} Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.730602 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerStarted","Data":"3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452"} Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.730629 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerStarted","Data":"e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84"} Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.730641 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerStarted","Data":"1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d"} Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.730652 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerStarted","Data":"386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa"} Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.730782 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.745086 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.757236 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.766594 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.781409 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.793038 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.817119 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.830436 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.839899 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.852746 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.862000 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.874018 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.886892 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.899118 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.921577 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.930443 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-vhnk4"] Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.930762 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-vhnk4" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.934133 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.935444 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.935700 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:05Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.952924 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 26 05:26:05 crc kubenswrapper[4871]: I1126 05:26:05.972592 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.023711 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.052503 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4967e853-6782-4ec9-bd03-6a98f803c1a6-host\") pod \"node-ca-vhnk4\" (UID: \"4967e853-6782-4ec9-bd03-6a98f803c1a6\") " pod="openshift-image-registry/node-ca-vhnk4" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.052648 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.052680 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/4967e853-6782-4ec9-bd03-6a98f803c1a6-serviceca\") pod \"node-ca-vhnk4\" (UID: \"4967e853-6782-4ec9-bd03-6a98f803c1a6\") " pod="openshift-image-registry/node-ca-vhnk4" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.052703 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58wsv\" (UniqueName: \"kubernetes.io/projected/4967e853-6782-4ec9-bd03-6a98f803c1a6-kube-api-access-58wsv\") pod \"node-ca-vhnk4\" (UID: \"4967e853-6782-4ec9-bd03-6a98f803c1a6\") " pod="openshift-image-registry/node-ca-vhnk4" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.052733 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:06 crc kubenswrapper[4871]: E1126 05:26:06.052816 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 05:26:06 crc kubenswrapper[4871]: E1126 05:26:06.052852 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 05:26:06 crc kubenswrapper[4871]: E1126 05:26:06.052861 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 05:26:06 crc kubenswrapper[4871]: E1126 05:26:06.052868 4871 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:06 crc kubenswrapper[4871]: E1126 05:26:06.052889 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 05:26:06 crc kubenswrapper[4871]: E1126 05:26:06.052907 4871 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:06 crc kubenswrapper[4871]: E1126 05:26:06.052958 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:10.052932369 +0000 UTC m=+28.235984015 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:06 crc kubenswrapper[4871]: E1126 05:26:06.052986 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:10.05297554 +0000 UTC m=+28.236027256 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.065032 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.099157 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.144299 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.153145 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:26:06 crc kubenswrapper[4871]: E1126 05:26:06.153395 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:26:10.153331171 +0000 UTC m=+28.336382797 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.154090 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4967e853-6782-4ec9-bd03-6a98f803c1a6-host\") pod \"node-ca-vhnk4\" (UID: \"4967e853-6782-4ec9-bd03-6a98f803c1a6\") " pod="openshift-image-registry/node-ca-vhnk4" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.154221 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4967e853-6782-4ec9-bd03-6a98f803c1a6-host\") pod \"node-ca-vhnk4\" (UID: \"4967e853-6782-4ec9-bd03-6a98f803c1a6\") " pod="openshift-image-registry/node-ca-vhnk4" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.154244 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/4967e853-6782-4ec9-bd03-6a98f803c1a6-serviceca\") pod \"node-ca-vhnk4\" (UID: \"4967e853-6782-4ec9-bd03-6a98f803c1a6\") " pod="openshift-image-registry/node-ca-vhnk4" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.154351 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58wsv\" (UniqueName: \"kubernetes.io/projected/4967e853-6782-4ec9-bd03-6a98f803c1a6-kube-api-access-58wsv\") pod \"node-ca-vhnk4\" (UID: \"4967e853-6782-4ec9-bd03-6a98f803c1a6\") " pod="openshift-image-registry/node-ca-vhnk4" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.155188 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/4967e853-6782-4ec9-bd03-6a98f803c1a6-serviceca\") pod \"node-ca-vhnk4\" (UID: \"4967e853-6782-4ec9-bd03-6a98f803c1a6\") " pod="openshift-image-registry/node-ca-vhnk4" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.198584 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58wsv\" (UniqueName: \"kubernetes.io/projected/4967e853-6782-4ec9-bd03-6a98f803c1a6-kube-api-access-58wsv\") pod \"node-ca-vhnk4\" (UID: \"4967e853-6782-4ec9-bd03-6a98f803c1a6\") " pod="openshift-image-registry/node-ca-vhnk4" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.207726 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.243727 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-vhnk4" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.252519 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.255056 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.255145 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:06 crc kubenswrapper[4871]: E1126 05:26:06.255281 4871 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 05:26:06 crc kubenswrapper[4871]: E1126 05:26:06.255329 4871 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 05:26:06 crc kubenswrapper[4871]: E1126 05:26:06.255407 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:10.255378733 +0000 UTC m=+28.438430359 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 05:26:06 crc kubenswrapper[4871]: E1126 05:26:06.255447 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:10.255433374 +0000 UTC m=+28.438485000 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.309347 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.326048 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.363161 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.404319 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.444065 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.480652 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.507185 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.507244 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:06 crc kubenswrapper[4871]: E1126 05:26:06.507293 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:06 crc kubenswrapper[4871]: E1126 05:26:06.507388 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.507455 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:06 crc kubenswrapper[4871]: E1126 05:26:06.507512 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.519002 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.562401 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.601471 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.649201 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.680151 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.721713 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.734970 4871 generic.go:334] "Generic (PLEG): container finished" podID="06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c" containerID="f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0" exitCode=0 Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.735169 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" event={"ID":"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c","Type":"ContainerDied","Data":"f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0"} Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.736366 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-vhnk4" event={"ID":"4967e853-6782-4ec9-bd03-6a98f803c1a6","Type":"ContainerStarted","Data":"e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b"} Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.736405 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-vhnk4" event={"ID":"4967e853-6782-4ec9-bd03-6a98f803c1a6","Type":"ContainerStarted","Data":"a1ff7cab938572101ff3e9a36c7da9e687c41bc91cdedd99db38a533819e0598"} Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.766276 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.800457 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.842225 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.885657 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.897059 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.899844 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.922004 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.937973 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 26 05:26:06 crc kubenswrapper[4871]: I1126 05:26:06.991018 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:06Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.017849 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.065702 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.112939 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.142574 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.181858 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.227426 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.262212 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.301558 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.346360 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.383714 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.428012 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.468294 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.505507 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.547837 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.582855 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.626073 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.662845 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.706519 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.743397 4871 generic.go:334] "Generic (PLEG): container finished" podID="06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c" containerID="6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91" exitCode=0 Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.743489 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" event={"ID":"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c","Type":"ContainerDied","Data":"6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91"} Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.753997 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.802512 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.827298 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.863472 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.903282 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.947276 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:07 crc kubenswrapper[4871]: I1126 05:26:07.986153 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:07Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.027198 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.065758 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.104223 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.141792 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.185629 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.222289 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.275947 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.300017 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.341061 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.386799 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.421500 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.462016 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.474968 4871 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.478002 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.478057 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.478076 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.478203 4871 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.506484 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.506506 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.506692 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:08 crc kubenswrapper[4871]: E1126 05:26:08.506622 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:08 crc kubenswrapper[4871]: E1126 05:26:08.506857 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:08 crc kubenswrapper[4871]: E1126 05:26:08.507033 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.507033 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.533263 4871 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.533561 4871 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.534505 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.534546 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.534586 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.534603 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.534614 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:08Z","lastTransitionTime":"2025-11-26T05:26:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:08 crc kubenswrapper[4871]: E1126 05:26:08.551326 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.556128 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.556181 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.556214 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.556233 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.556246 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:08Z","lastTransitionTime":"2025-11-26T05:26:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:08 crc kubenswrapper[4871]: E1126 05:26:08.570544 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.574813 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.574861 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.574878 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.574902 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.574919 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:08Z","lastTransitionTime":"2025-11-26T05:26:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:08 crc kubenswrapper[4871]: E1126 05:26:08.588848 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.592699 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.592748 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.592763 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.592783 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.592795 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:08Z","lastTransitionTime":"2025-11-26T05:26:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:08 crc kubenswrapper[4871]: E1126 05:26:08.611643 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.614886 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.614927 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.614941 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.614961 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.614972 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:08Z","lastTransitionTime":"2025-11-26T05:26:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:08 crc kubenswrapper[4871]: E1126 05:26:08.636436 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: E1126 05:26:08.636767 4871 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.638619 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.638694 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.638719 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.638754 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.638779 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:08Z","lastTransitionTime":"2025-11-26T05:26:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.742137 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.742193 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.742206 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.742224 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.742235 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:08Z","lastTransitionTime":"2025-11-26T05:26:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.760581 4871 generic.go:334] "Generic (PLEG): container finished" podID="06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c" containerID="ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3" exitCode=0 Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.760673 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" event={"ID":"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c","Type":"ContainerDied","Data":"ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3"} Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.767453 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerStarted","Data":"8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647"} Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.779989 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.800476 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.819159 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.834277 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.848897 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.848935 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.848945 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.848963 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.849004 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:08Z","lastTransitionTime":"2025-11-26T05:26:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.856553 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.871812 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.884597 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.898643 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.911624 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.944856 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.951389 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.951475 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.951504 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.951591 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.951606 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:08Z","lastTransitionTime":"2025-11-26T05:26:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:08 crc kubenswrapper[4871]: I1126 05:26:08.984382 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:08Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.023109 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:09Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.053904 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.053952 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.053962 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.053975 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.053984 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:09Z","lastTransitionTime":"2025-11-26T05:26:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.077648 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:09Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.101957 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:09Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.156589 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.156645 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.156659 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.156677 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.156689 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:09Z","lastTransitionTime":"2025-11-26T05:26:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.260702 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.260744 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.260754 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.260772 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.260781 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:09Z","lastTransitionTime":"2025-11-26T05:26:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.364424 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.364462 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.364475 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.364491 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.364504 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:09Z","lastTransitionTime":"2025-11-26T05:26:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.467511 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.467574 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.467587 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.467638 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.467650 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:09Z","lastTransitionTime":"2025-11-26T05:26:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.570987 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.571045 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.571062 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.571086 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.571104 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:09Z","lastTransitionTime":"2025-11-26T05:26:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.673951 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.674018 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.674055 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.674087 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.674111 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:09Z","lastTransitionTime":"2025-11-26T05:26:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.777878 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.777940 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.777958 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.777981 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.777999 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:09Z","lastTransitionTime":"2025-11-26T05:26:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.784300 4871 generic.go:334] "Generic (PLEG): container finished" podID="06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c" containerID="22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8" exitCode=0 Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.784392 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" event={"ID":"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c","Type":"ContainerDied","Data":"22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8"} Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.803468 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:09Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.823291 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:09Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.857705 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:09Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.879108 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:09Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.885802 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.885868 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.885885 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.885958 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.885976 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:09Z","lastTransitionTime":"2025-11-26T05:26:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.896724 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:09Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.911652 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:09Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.925560 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:09Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.955924 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:09Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.989767 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.989807 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.989822 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.989840 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:09 crc kubenswrapper[4871]: I1126 05:26:09.989854 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:09Z","lastTransitionTime":"2025-11-26T05:26:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.010640 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.041971 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.059737 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.080025 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.093866 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.093911 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.093923 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.093944 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.093956 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:10Z","lastTransitionTime":"2025-11-26T05:26:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.094783 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.098950 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.099177 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:10 crc kubenswrapper[4871]: E1126 05:26:10.099141 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 05:26:10 crc kubenswrapper[4871]: E1126 05:26:10.099384 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 05:26:10 crc kubenswrapper[4871]: E1126 05:26:10.099457 4871 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:10 crc kubenswrapper[4871]: E1126 05:26:10.099317 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 05:26:10 crc kubenswrapper[4871]: E1126 05:26:10.099752 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 05:26:10 crc kubenswrapper[4871]: E1126 05:26:10.099785 4871 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:10 crc kubenswrapper[4871]: E1126 05:26:10.099853 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:18.099831831 +0000 UTC m=+36.282883417 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:10 crc kubenswrapper[4871]: E1126 05:26:10.100647 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:18.10063149 +0000 UTC m=+36.283683076 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.106182 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.196571 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.196598 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.196607 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.196622 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.196631 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:10Z","lastTransitionTime":"2025-11-26T05:26:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.200076 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:26:10 crc kubenswrapper[4871]: E1126 05:26:10.200202 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:26:18.200182282 +0000 UTC m=+36.383233878 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.299440 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.299471 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.299479 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.299493 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.299506 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:10Z","lastTransitionTime":"2025-11-26T05:26:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.301038 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.301086 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:10 crc kubenswrapper[4871]: E1126 05:26:10.301206 4871 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 05:26:10 crc kubenswrapper[4871]: E1126 05:26:10.301219 4871 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 05:26:10 crc kubenswrapper[4871]: E1126 05:26:10.301273 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:18.3012545 +0000 UTC m=+36.484306096 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 05:26:10 crc kubenswrapper[4871]: E1126 05:26:10.301287 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:18.30128187 +0000 UTC m=+36.484333456 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.402558 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.402611 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.402630 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.402654 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.402672 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:10Z","lastTransitionTime":"2025-11-26T05:26:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.505455 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.505507 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.505543 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.505562 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.505575 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:10Z","lastTransitionTime":"2025-11-26T05:26:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.507087 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.507190 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.507091 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:10 crc kubenswrapper[4871]: E1126 05:26:10.507324 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:10 crc kubenswrapper[4871]: E1126 05:26:10.507499 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:10 crc kubenswrapper[4871]: E1126 05:26:10.507631 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.608723 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.608813 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.608825 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.608847 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.608859 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:10Z","lastTransitionTime":"2025-11-26T05:26:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.711966 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.712035 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.712047 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.712072 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.712087 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:10Z","lastTransitionTime":"2025-11-26T05:26:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.801876 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" event={"ID":"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c","Type":"ContainerStarted","Data":"145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249"} Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.811657 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerStarted","Data":"221bd3e870fb26c640d21cb0528555b3586eb52bf031ddd34cc98df9db7d29d2"} Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.812625 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.812674 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.814402 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.814446 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.814462 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.814484 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.814501 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:10Z","lastTransitionTime":"2025-11-26T05:26:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.820393 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.841860 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.848188 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.859336 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.881236 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.898500 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.914192 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.917508 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.917567 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.917583 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.917604 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.917623 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:10Z","lastTransitionTime":"2025-11-26T05:26:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.937197 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.969846 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:10 crc kubenswrapper[4871]: I1126 05:26:10.984680 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.002183 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.020265 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.020320 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.020339 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.020361 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.020375 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:11Z","lastTransitionTime":"2025-11-26T05:26:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.022118 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.038575 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.057762 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.076304 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.094295 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.117704 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.122902 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.122960 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.122979 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.123008 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.123026 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:11Z","lastTransitionTime":"2025-11-26T05:26:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.141414 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.163895 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.181889 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.202679 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.225868 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.226303 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.226376 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.226401 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.226433 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.226451 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:11Z","lastTransitionTime":"2025-11-26T05:26:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.258351 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://221bd3e870fb26c640d21cb0528555b3586eb52bf031ddd34cc98df9db7d29d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.274864 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.295284 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.313096 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.329071 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.329681 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.329726 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.329743 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.329763 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.329781 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:11Z","lastTransitionTime":"2025-11-26T05:26:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.346595 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.361217 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.433430 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.433480 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.433492 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.433510 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.433540 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:11Z","lastTransitionTime":"2025-11-26T05:26:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.536988 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.537040 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.537052 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.537072 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.537084 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:11Z","lastTransitionTime":"2025-11-26T05:26:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.642796 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.642883 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.642902 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.642922 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.642947 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:11Z","lastTransitionTime":"2025-11-26T05:26:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.746679 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.746758 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.746781 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.746811 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.746835 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:11Z","lastTransitionTime":"2025-11-26T05:26:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.816919 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.850392 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.850462 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.850480 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.850506 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.850551 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:11Z","lastTransitionTime":"2025-11-26T05:26:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.851381 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.877845 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.900759 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.931567 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://221bd3e870fb26c640d21cb0528555b3586eb52bf031ddd34cc98df9db7d29d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.951099 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.953488 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.953576 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.953601 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.953633 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.953657 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:11Z","lastTransitionTime":"2025-11-26T05:26:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.977632 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:11 crc kubenswrapper[4871]: I1126 05:26:11.998293 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:11Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.018353 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.036822 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.054786 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.056179 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.056219 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.056230 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.056246 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.056259 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:12Z","lastTransitionTime":"2025-11-26T05:26:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.074514 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.090418 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.110888 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.132406 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.147611 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.158688 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.158748 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.158766 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.158790 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.158807 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:12Z","lastTransitionTime":"2025-11-26T05:26:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.262294 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.262362 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.262380 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.262407 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.262427 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:12Z","lastTransitionTime":"2025-11-26T05:26:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.365772 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.365847 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.365867 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.365890 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.365909 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:12Z","lastTransitionTime":"2025-11-26T05:26:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.469566 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.469628 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.469646 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.469672 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.469695 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:12Z","lastTransitionTime":"2025-11-26T05:26:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.507098 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.507133 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.507145 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:12 crc kubenswrapper[4871]: E1126 05:26:12.507262 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:12 crc kubenswrapper[4871]: E1126 05:26:12.507470 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:12 crc kubenswrapper[4871]: E1126 05:26:12.507651 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.527945 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.553199 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.571861 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.571920 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.571937 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.571958 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.571973 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:12Z","lastTransitionTime":"2025-11-26T05:26:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.593414 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.616102 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.631096 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.647487 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.661707 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.674624 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.674669 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.674680 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.674695 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.674703 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:12Z","lastTransitionTime":"2025-11-26T05:26:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.679358 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://221bd3e870fb26c640d21cb0528555b3586eb52bf031ddd34cc98df9db7d29d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.690437 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.710655 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.731796 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.745187 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.762767 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.772179 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.777023 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.777090 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.777108 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.777134 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.777152 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:12Z","lastTransitionTime":"2025-11-26T05:26:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.879879 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.879943 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.879971 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.880002 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.880027 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:12Z","lastTransitionTime":"2025-11-26T05:26:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.982937 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.983025 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.983048 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.983078 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:12 crc kubenswrapper[4871]: I1126 05:26:12.983101 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:12Z","lastTransitionTime":"2025-11-26T05:26:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.085792 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.085828 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.085846 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.085870 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.085886 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:13Z","lastTransitionTime":"2025-11-26T05:26:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.187796 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.187846 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.187858 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.187874 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.187888 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:13Z","lastTransitionTime":"2025-11-26T05:26:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.289796 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.289860 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.289883 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.289908 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.289938 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:13Z","lastTransitionTime":"2025-11-26T05:26:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.302222 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.322083 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.341869 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.356272 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.372986 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.386495 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.391703 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.391726 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.391734 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.391747 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.391758 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:13Z","lastTransitionTime":"2025-11-26T05:26:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.405774 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.425488 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.460606 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://221bd3e870fb26c640d21cb0528555b3586eb52bf031ddd34cc98df9db7d29d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.474757 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.493934 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.493963 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.493972 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.493986 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.493995 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:13Z","lastTransitionTime":"2025-11-26T05:26:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.497939 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.509911 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.528946 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.545856 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.555020 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.597104 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.597187 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.597196 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.597208 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.597218 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:13Z","lastTransitionTime":"2025-11-26T05:26:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.700317 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.700705 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.700802 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.700840 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.700884 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:13Z","lastTransitionTime":"2025-11-26T05:26:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.803743 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.803875 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.803896 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.803967 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.803986 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:13Z","lastTransitionTime":"2025-11-26T05:26:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.826052 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovnkube-controller/0.log" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.829876 4871 generic.go:334] "Generic (PLEG): container finished" podID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerID="221bd3e870fb26c640d21cb0528555b3586eb52bf031ddd34cc98df9db7d29d2" exitCode=1 Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.829936 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerDied","Data":"221bd3e870fb26c640d21cb0528555b3586eb52bf031ddd34cc98df9db7d29d2"} Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.830954 4871 scope.go:117] "RemoveContainer" containerID="221bd3e870fb26c640d21cb0528555b3586eb52bf031ddd34cc98df9db7d29d2" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.858845 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.882230 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.907191 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.907262 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.907284 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.907316 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.907338 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:13Z","lastTransitionTime":"2025-11-26T05:26:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.914224 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://221bd3e870fb26c640d21cb0528555b3586eb52bf031ddd34cc98df9db7d29d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://221bd3e870fb26c640d21cb0528555b3586eb52bf031ddd34cc98df9db7d29d2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"message\\\":\\\"6 05:26:13.211003 6167 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1126 05:26:13.211020 6167 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 05:26:13.211025 6167 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 05:26:13.211050 6167 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 05:26:13.211077 6167 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 05:26:13.211123 6167 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 05:26:13.211131 6167 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 05:26:13.211165 6167 factory.go:656] Stopping watch factory\\\\nI1126 05:26:13.211181 6167 ovnkube.go:599] Stopped ovnkube\\\\nI1126 05:26:13.211207 6167 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1126 05:26:13.211219 6167 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1126 05:26:13.211228 6167 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 05:26:13.211236 6167 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 05:26:13.211248 6167 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 05:26:13.211256 6167 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 05:26:13.211264 6167 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.934318 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.951662 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.966095 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.982003 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:13 crc kubenswrapper[4871]: I1126 05:26:13.998705 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:13Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.009737 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.009784 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.009798 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.009819 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.009834 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:14Z","lastTransitionTime":"2025-11-26T05:26:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.023078 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:14Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.045252 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:14Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.059704 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:14Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.076224 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:14Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.093297 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:14Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.104830 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:14Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.112576 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.112621 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.112635 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.112652 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.112664 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:14Z","lastTransitionTime":"2025-11-26T05:26:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.216019 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.216126 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.216151 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.216649 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.216936 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:14Z","lastTransitionTime":"2025-11-26T05:26:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.320373 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.320445 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.320474 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.320505 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.320522 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:14Z","lastTransitionTime":"2025-11-26T05:26:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.423641 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.423700 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.423718 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.423743 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.423766 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:14Z","lastTransitionTime":"2025-11-26T05:26:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.507487 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.507574 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.507523 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:14 crc kubenswrapper[4871]: E1126 05:26:14.507790 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:14 crc kubenswrapper[4871]: E1126 05:26:14.507916 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:14 crc kubenswrapper[4871]: E1126 05:26:14.508098 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.527494 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.527587 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.527612 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.527644 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.527667 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:14Z","lastTransitionTime":"2025-11-26T05:26:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.630023 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.630072 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.630083 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.630099 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.630112 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:14Z","lastTransitionTime":"2025-11-26T05:26:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.732924 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.732975 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.732991 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.733011 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.733027 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:14Z","lastTransitionTime":"2025-11-26T05:26:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.834426 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.834474 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.834498 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.834521 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.834562 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:14Z","lastTransitionTime":"2025-11-26T05:26:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.836437 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovnkube-controller/0.log" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.839395 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerStarted","Data":"ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3"} Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.840458 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.864208 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:14Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.880742 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:14Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.895982 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:14Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.911739 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:14Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.927445 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:14Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.936856 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.936898 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.936910 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.936927 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.936941 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:14Z","lastTransitionTime":"2025-11-26T05:26:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.954002 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://221bd3e870fb26c640d21cb0528555b3586eb52bf031ddd34cc98df9db7d29d2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"message\\\":\\\"6 05:26:13.211003 6167 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1126 05:26:13.211020 6167 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 05:26:13.211025 6167 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 05:26:13.211050 6167 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 05:26:13.211077 6167 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 05:26:13.211123 6167 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 05:26:13.211131 6167 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 05:26:13.211165 6167 factory.go:656] Stopping watch factory\\\\nI1126 05:26:13.211181 6167 ovnkube.go:599] Stopped ovnkube\\\\nI1126 05:26:13.211207 6167 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1126 05:26:13.211219 6167 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1126 05:26:13.211228 6167 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 05:26:13.211236 6167 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 05:26:13.211248 6167 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 05:26:13.211256 6167 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 05:26:13.211264 6167 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:14Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.967885 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:14Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:14 crc kubenswrapper[4871]: I1126 05:26:14.988376 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:14Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.004503 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.014698 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.032315 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.039611 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.039650 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.039661 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.039678 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.039690 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:15Z","lastTransitionTime":"2025-11-26T05:26:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.051284 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.068556 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.088253 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.142744 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.142813 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.142852 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.142888 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.142911 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:15Z","lastTransitionTime":"2025-11-26T05:26:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.245209 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.245288 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.245311 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.245335 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.245357 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:15Z","lastTransitionTime":"2025-11-26T05:26:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.347950 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.348018 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.348036 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.348060 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.348076 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:15Z","lastTransitionTime":"2025-11-26T05:26:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.450627 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.450679 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.450692 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.450716 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.450734 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:15Z","lastTransitionTime":"2025-11-26T05:26:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.553606 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.553662 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.553680 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.553704 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.553722 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:15Z","lastTransitionTime":"2025-11-26T05:26:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.657008 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.657064 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.657076 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.657094 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.657122 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:15Z","lastTransitionTime":"2025-11-26T05:26:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.711415 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz"] Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.712044 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.713634 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.714365 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.729678 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.759935 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.759976 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.759989 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.760008 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.760021 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:15Z","lastTransitionTime":"2025-11-26T05:26:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.762887 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://221bd3e870fb26c640d21cb0528555b3586eb52bf031ddd34cc98df9db7d29d2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"message\\\":\\\"6 05:26:13.211003 6167 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1126 05:26:13.211020 6167 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 05:26:13.211025 6167 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 05:26:13.211050 6167 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 05:26:13.211077 6167 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 05:26:13.211123 6167 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 05:26:13.211131 6167 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 05:26:13.211165 6167 factory.go:656] Stopping watch factory\\\\nI1126 05:26:13.211181 6167 ovnkube.go:599] Stopped ovnkube\\\\nI1126 05:26:13.211207 6167 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1126 05:26:13.211219 6167 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1126 05:26:13.211228 6167 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 05:26:13.211236 6167 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 05:26:13.211248 6167 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 05:26:13.211256 6167 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 05:26:13.211264 6167 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.766176 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5ebf7372-f87d-40b5-ab3b-52fc9622ff3a-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7g5pz\" (UID: \"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.766238 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6brdl\" (UniqueName: \"kubernetes.io/projected/5ebf7372-f87d-40b5-ab3b-52fc9622ff3a-kube-api-access-6brdl\") pod \"ovnkube-control-plane-749d76644c-7g5pz\" (UID: \"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.766339 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5ebf7372-f87d-40b5-ab3b-52fc9622ff3a-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7g5pz\" (UID: \"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.766490 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5ebf7372-f87d-40b5-ab3b-52fc9622ff3a-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7g5pz\" (UID: \"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.780413 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.797889 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.815934 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.835126 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.846843 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovnkube-controller/1.log" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.847870 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovnkube-controller/0.log" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.850966 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.853267 4871 generic.go:334] "Generic (PLEG): container finished" podID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerID="ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3" exitCode=1 Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.853340 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerDied","Data":"ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3"} Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.853443 4871 scope.go:117] "RemoveContainer" containerID="221bd3e870fb26c640d21cb0528555b3586eb52bf031ddd34cc98df9db7d29d2" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.854427 4871 scope.go:117] "RemoveContainer" containerID="ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3" Nov 26 05:26:15 crc kubenswrapper[4871]: E1126 05:26:15.854871 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.862443 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.862546 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.862570 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.862599 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.862622 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:15Z","lastTransitionTime":"2025-11-26T05:26:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.869077 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5ebf7372-f87d-40b5-ab3b-52fc9622ff3a-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7g5pz\" (UID: \"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.869162 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6brdl\" (UniqueName: \"kubernetes.io/projected/5ebf7372-f87d-40b5-ab3b-52fc9622ff3a-kube-api-access-6brdl\") pod \"ovnkube-control-plane-749d76644c-7g5pz\" (UID: \"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.869258 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5ebf7372-f87d-40b5-ab3b-52fc9622ff3a-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7g5pz\" (UID: \"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.869332 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5ebf7372-f87d-40b5-ab3b-52fc9622ff3a-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7g5pz\" (UID: \"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.870330 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5ebf7372-f87d-40b5-ab3b-52fc9622ff3a-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7g5pz\" (UID: \"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.870633 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5ebf7372-f87d-40b5-ab3b-52fc9622ff3a-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7g5pz\" (UID: \"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.875982 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.878904 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5ebf7372-f87d-40b5-ab3b-52fc9622ff3a-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7g5pz\" (UID: \"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.892809 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.900838 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6brdl\" (UniqueName: \"kubernetes.io/projected/5ebf7372-f87d-40b5-ab3b-52fc9622ff3a-kube-api-access-6brdl\") pod \"ovnkube-control-plane-749d76644c-7g5pz\" (UID: \"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.909617 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.929206 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.947038 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.962648 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.966165 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.966224 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.966241 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.966266 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.966283 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:15Z","lastTransitionTime":"2025-11-26T05:26:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.978907 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:15 crc kubenswrapper[4871]: I1126 05:26:15.997316 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:15Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.016692 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.031115 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.031083 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.053015 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: W1126 05:26:16.053684 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ebf7372_f87d_40b5_ab3b_52fc9622ff3a.slice/crio-8df2b30db7066bdf56d7390d3ccb00aeccde44b45918bc3a56396a4df4e5a8db WatchSource:0}: Error finding container 8df2b30db7066bdf56d7390d3ccb00aeccde44b45918bc3a56396a4df4e5a8db: Status 404 returned error can't find the container with id 8df2b30db7066bdf56d7390d3ccb00aeccde44b45918bc3a56396a4df4e5a8db Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.069017 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.069058 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.069071 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.069093 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.069104 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:16Z","lastTransitionTime":"2025-11-26T05:26:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.074443 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.089160 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.106001 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.125579 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.146326 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://221bd3e870fb26c640d21cb0528555b3586eb52bf031ddd34cc98df9db7d29d2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"message\\\":\\\"6 05:26:13.211003 6167 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1126 05:26:13.211020 6167 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 05:26:13.211025 6167 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 05:26:13.211050 6167 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 05:26:13.211077 6167 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 05:26:13.211123 6167 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 05:26:13.211131 6167 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 05:26:13.211165 6167 factory.go:656] Stopping watch factory\\\\nI1126 05:26:13.211181 6167 ovnkube.go:599] Stopped ovnkube\\\\nI1126 05:26:13.211207 6167 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1126 05:26:13.211219 6167 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1126 05:26:13.211228 6167 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 05:26:13.211236 6167 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 05:26:13.211248 6167 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 05:26:13.211256 6167 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 05:26:13.211264 6167 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"message\\\":\\\"ePort{ServicePort{Name:https,Protocol:TCP,Port:443,TargetPort:{0 10257 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{kube-controller-manager: true,},ClusterIP:10.217.4.36,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.36],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1126 05:26:14.951497 6305 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1126 05:26:14.951501 6305 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1126 05:26:14.951510 6305 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1126 05:26:14.951562 6305 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nF1126 05:26:14.951571 6305 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.159057 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.171446 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.171492 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.171503 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.171538 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.171550 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:16Z","lastTransitionTime":"2025-11-26T05:26:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.175079 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.190062 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.203697 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.216272 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.230402 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.249201 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.273949 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.273989 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.274001 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.274018 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.274030 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:16Z","lastTransitionTime":"2025-11-26T05:26:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.376739 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.376844 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.376867 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.376897 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.376936 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:16Z","lastTransitionTime":"2025-11-26T05:26:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.480066 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.480132 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.480151 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.480178 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.480195 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:16Z","lastTransitionTime":"2025-11-26T05:26:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.507810 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:16 crc kubenswrapper[4871]: E1126 05:26:16.508017 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.508375 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:16 crc kubenswrapper[4871]: E1126 05:26:16.508615 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.508704 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:16 crc kubenswrapper[4871]: E1126 05:26:16.509819 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.583637 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.583699 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.583715 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.583741 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.583760 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:16Z","lastTransitionTime":"2025-11-26T05:26:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.687094 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.687206 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.687226 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.687258 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.687279 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:16Z","lastTransitionTime":"2025-11-26T05:26:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.790215 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.790260 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.790271 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.790287 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.790299 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:16Z","lastTransitionTime":"2025-11-26T05:26:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.847787 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-z2d5h"] Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.848233 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:16 crc kubenswrapper[4871]: E1126 05:26:16.848294 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.861823 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" event={"ID":"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a","Type":"ContainerStarted","Data":"735bd64ef711a24fc257d682557fc22bb34bb80f5a0ed913d575ce78a84cb01c"} Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.861878 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" event={"ID":"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a","Type":"ContainerStarted","Data":"bc94d0cf979cf21c04eefe998ba381ea922779f079981532be41e154faa17e3d"} Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.861891 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" event={"ID":"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a","Type":"ContainerStarted","Data":"8df2b30db7066bdf56d7390d3ccb00aeccde44b45918bc3a56396a4df4e5a8db"} Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.863842 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovnkube-controller/1.log" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.870324 4871 scope.go:117] "RemoveContainer" containerID="ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3" Nov 26 05:26:16 crc kubenswrapper[4871]: E1126 05:26:16.870479 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.875494 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.881744 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs\") pod \"network-metrics-daemon-z2d5h\" (UID: \"30b3c82b-ca2a-4821-86e0-94aa2afce847\") " pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.881865 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7h6n4\" (UniqueName: \"kubernetes.io/projected/30b3c82b-ca2a-4821-86e0-94aa2afce847-kube-api-access-7h6n4\") pod \"network-metrics-daemon-z2d5h\" (UID: \"30b3c82b-ca2a-4821-86e0-94aa2afce847\") " pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.907895 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.907932 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.907942 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.907955 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.907966 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:16Z","lastTransitionTime":"2025-11-26T05:26:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.923291 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://221bd3e870fb26c640d21cb0528555b3586eb52bf031ddd34cc98df9db7d29d2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"message\\\":\\\"6 05:26:13.211003 6167 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1126 05:26:13.211020 6167 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 05:26:13.211025 6167 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 05:26:13.211050 6167 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 05:26:13.211077 6167 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 05:26:13.211123 6167 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 05:26:13.211131 6167 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 05:26:13.211165 6167 factory.go:656] Stopping watch factory\\\\nI1126 05:26:13.211181 6167 ovnkube.go:599] Stopped ovnkube\\\\nI1126 05:26:13.211207 6167 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1126 05:26:13.211219 6167 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1126 05:26:13.211228 6167 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 05:26:13.211236 6167 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 05:26:13.211248 6167 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 05:26:13.211256 6167 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 05:26:13.211264 6167 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"message\\\":\\\"ePort{ServicePort{Name:https,Protocol:TCP,Port:443,TargetPort:{0 10257 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{kube-controller-manager: true,},ClusterIP:10.217.4.36,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.36],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1126 05:26:14.951497 6305 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1126 05:26:14.951501 6305 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1126 05:26:14.951510 6305 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1126 05:26:14.951562 6305 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nF1126 05:26:14.951571 6305 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.934053 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.950778 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.963322 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.975809 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.982437 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs\") pod \"network-metrics-daemon-z2d5h\" (UID: \"30b3c82b-ca2a-4821-86e0-94aa2afce847\") " pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.982573 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7h6n4\" (UniqueName: \"kubernetes.io/projected/30b3c82b-ca2a-4821-86e0-94aa2afce847-kube-api-access-7h6n4\") pod \"network-metrics-daemon-z2d5h\" (UID: \"30b3c82b-ca2a-4821-86e0-94aa2afce847\") " pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:16 crc kubenswrapper[4871]: E1126 05:26:16.984109 4871 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 05:26:16 crc kubenswrapper[4871]: E1126 05:26:16.984241 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs podName:30b3c82b-ca2a-4821-86e0-94aa2afce847 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:17.484205997 +0000 UTC m=+35.667257623 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs") pod "network-metrics-daemon-z2d5h" (UID: "30b3c82b-ca2a-4821-86e0-94aa2afce847") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 05:26:16 crc kubenswrapper[4871]: I1126 05:26:16.990631 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:16Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.011683 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.011871 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.011891 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.012062 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.012088 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:17Z","lastTransitionTime":"2025-11-26T05:26:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.012823 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.019094 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7h6n4\" (UniqueName: \"kubernetes.io/projected/30b3c82b-ca2a-4821-86e0-94aa2afce847-kube-api-access-7h6n4\") pod \"network-metrics-daemon-z2d5h\" (UID: \"30b3c82b-ca2a-4821-86e0-94aa2afce847\") " pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.027253 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.043920 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.062910 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.077428 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30b3c82b-ca2a-4821-86e0-94aa2afce847\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:16Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-z2d5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.095519 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.112285 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.116162 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.116221 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.116240 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.116282 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.116301 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:17Z","lastTransitionTime":"2025-11-26T05:26:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.126782 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.147014 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.165802 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.186214 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.209677 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"message\\\":\\\"ePort{ServicePort{Name:https,Protocol:TCP,Port:443,TargetPort:{0 10257 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{kube-controller-manager: true,},ClusterIP:10.217.4.36,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.36],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1126 05:26:14.951497 6305 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1126 05:26:14.951501 6305 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1126 05:26:14.951510 6305 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1126 05:26:14.951562 6305 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nF1126 05:26:14.951571 6305 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.219701 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.219768 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.219785 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.219809 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.219826 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:17Z","lastTransitionTime":"2025-11-26T05:26:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.227041 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.240886 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.251653 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.262759 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.273051 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.285152 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.297314 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc94d0cf979cf21c04eefe998ba381ea922779f079981532be41e154faa17e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://735bd64ef711a24fc257d682557fc22bb34bb80f5a0ed913d575ce78a84cb01c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.310245 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.322000 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.322038 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.322047 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.322062 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.322072 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:17Z","lastTransitionTime":"2025-11-26T05:26:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.322659 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.335005 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.347193 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30b3c82b-ca2a-4821-86e0-94aa2afce847\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:16Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-z2d5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.359485 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.374166 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:17Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.424985 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.425043 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.425059 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.425084 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.425101 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:17Z","lastTransitionTime":"2025-11-26T05:26:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.488201 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs\") pod \"network-metrics-daemon-z2d5h\" (UID: \"30b3c82b-ca2a-4821-86e0-94aa2afce847\") " pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:17 crc kubenswrapper[4871]: E1126 05:26:17.488334 4871 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 05:26:17 crc kubenswrapper[4871]: E1126 05:26:17.488399 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs podName:30b3c82b-ca2a-4821-86e0-94aa2afce847 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:18.4883828 +0000 UTC m=+36.671434396 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs") pod "network-metrics-daemon-z2d5h" (UID: "30b3c82b-ca2a-4821-86e0-94aa2afce847") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.527562 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.527604 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.527616 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.527631 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.527682 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:17Z","lastTransitionTime":"2025-11-26T05:26:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.630235 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.630269 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.630279 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.630294 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.630306 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:17Z","lastTransitionTime":"2025-11-26T05:26:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.732730 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.732791 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.732812 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.732838 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.732855 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:17Z","lastTransitionTime":"2025-11-26T05:26:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.835410 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.835450 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.835459 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.835472 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:17 crc kubenswrapper[4871]: I1126 05:26:17.835482 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:17Z","lastTransitionTime":"2025-11-26T05:26:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.113183 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.113268 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.113366 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.113400 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.113421 4871 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.113504 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.113561 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.113582 4871 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.113774 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:34.113471323 +0000 UTC m=+52.296522939 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.113813 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:34.113797451 +0000 UTC m=+52.296849077 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.116634 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.116677 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.116695 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.116719 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.116737 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:18Z","lastTransitionTime":"2025-11-26T05:26:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.214225 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.214640 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:26:34.214616233 +0000 UTC m=+52.397667849 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.219520 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.219590 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.219612 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.219637 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.219658 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:18Z","lastTransitionTime":"2025-11-26T05:26:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.315486 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.315585 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.315695 4871 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.315740 4871 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.315808 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:34.315784053 +0000 UTC m=+52.498835719 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.315835 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:34.315822824 +0000 UTC m=+52.498874530 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.321921 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.321949 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.321961 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.321977 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.321988 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:18Z","lastTransitionTime":"2025-11-26T05:26:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.424661 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.424721 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.424738 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.424764 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.424782 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:18Z","lastTransitionTime":"2025-11-26T05:26:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.507834 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.507903 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.507939 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.507906 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.508053 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.508248 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.508417 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.508562 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.518324 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs\") pod \"network-metrics-daemon-z2d5h\" (UID: \"30b3c82b-ca2a-4821-86e0-94aa2afce847\") " pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.518521 4871 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.518649 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs podName:30b3c82b-ca2a-4821-86e0-94aa2afce847 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:20.518623576 +0000 UTC m=+38.701675192 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs") pod "network-metrics-daemon-z2d5h" (UID: "30b3c82b-ca2a-4821-86e0-94aa2afce847") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.527115 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.527172 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.527198 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.527227 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.527248 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:18Z","lastTransitionTime":"2025-11-26T05:26:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.630298 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.630359 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.630376 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.630400 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.630418 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:18Z","lastTransitionTime":"2025-11-26T05:26:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.688662 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.688746 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.688769 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.688799 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.688826 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:18Z","lastTransitionTime":"2025-11-26T05:26:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.711639 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:18Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.716507 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.716564 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.716590 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.716607 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.716619 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:18Z","lastTransitionTime":"2025-11-26T05:26:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.744918 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:18Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.750267 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.750352 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.750372 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.750400 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.750424 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:18Z","lastTransitionTime":"2025-11-26T05:26:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.771109 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:18Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.777753 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.777793 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.777810 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.777834 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.777851 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:18Z","lastTransitionTime":"2025-11-26T05:26:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.798515 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:18Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.804031 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.804081 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.804097 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.804117 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.804136 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:18Z","lastTransitionTime":"2025-11-26T05:26:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.823208 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:18Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:18 crc kubenswrapper[4871]: E1126 05:26:18.823906 4871 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.825913 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.826127 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.826304 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.826482 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.826699 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:18Z","lastTransitionTime":"2025-11-26T05:26:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.929965 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.930203 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.930377 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.930619 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:18 crc kubenswrapper[4871]: I1126 05:26:18.930817 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:18Z","lastTransitionTime":"2025-11-26T05:26:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.033874 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.033925 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.033942 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.034016 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.034035 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:19Z","lastTransitionTime":"2025-11-26T05:26:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.136848 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.136935 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.136952 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.136974 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.136991 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:19Z","lastTransitionTime":"2025-11-26T05:26:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.239910 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.239970 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.239995 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.240022 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.240043 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:19Z","lastTransitionTime":"2025-11-26T05:26:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.342730 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.342789 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.342814 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.342842 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.342862 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:19Z","lastTransitionTime":"2025-11-26T05:26:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.446504 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.446609 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.446632 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.446664 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.446686 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:19Z","lastTransitionTime":"2025-11-26T05:26:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.549514 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.549626 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.549651 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.549681 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.549704 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:19Z","lastTransitionTime":"2025-11-26T05:26:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.653094 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.653156 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.653180 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.653209 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.653229 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:19Z","lastTransitionTime":"2025-11-26T05:26:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.756121 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.756217 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.756239 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.756262 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.756280 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:19Z","lastTransitionTime":"2025-11-26T05:26:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.859952 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.860041 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.860070 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.860098 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.860117 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:19Z","lastTransitionTime":"2025-11-26T05:26:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.961761 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.961805 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.961820 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.961838 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:19 crc kubenswrapper[4871]: I1126 05:26:19.961851 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:19Z","lastTransitionTime":"2025-11-26T05:26:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.064703 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.064740 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.064749 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.064762 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.064772 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:20Z","lastTransitionTime":"2025-11-26T05:26:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.167604 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.167642 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.167653 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.167669 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.167680 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:20Z","lastTransitionTime":"2025-11-26T05:26:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.271128 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.271506 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.271677 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.271840 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.271994 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:20Z","lastTransitionTime":"2025-11-26T05:26:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.375232 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.375590 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.375694 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.375800 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.375884 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:20Z","lastTransitionTime":"2025-11-26T05:26:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.479156 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.479248 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.479274 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.479307 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.479332 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:20Z","lastTransitionTime":"2025-11-26T05:26:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.506960 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.507023 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.507105 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:20 crc kubenswrapper[4871]: E1126 05:26:20.507346 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:20 crc kubenswrapper[4871]: E1126 05:26:20.507578 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:20 crc kubenswrapper[4871]: E1126 05:26:20.507718 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.507926 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:20 crc kubenswrapper[4871]: E1126 05:26:20.508266 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.540114 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs\") pod \"network-metrics-daemon-z2d5h\" (UID: \"30b3c82b-ca2a-4821-86e0-94aa2afce847\") " pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:20 crc kubenswrapper[4871]: E1126 05:26:20.540325 4871 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 05:26:20 crc kubenswrapper[4871]: E1126 05:26:20.540437 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs podName:30b3c82b-ca2a-4821-86e0-94aa2afce847 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:24.540405487 +0000 UTC m=+42.723457123 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs") pod "network-metrics-daemon-z2d5h" (UID: "30b3c82b-ca2a-4821-86e0-94aa2afce847") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.582558 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.582621 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.582639 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.582664 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.582683 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:20Z","lastTransitionTime":"2025-11-26T05:26:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.685975 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.686327 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.686610 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.686838 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.687038 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:20Z","lastTransitionTime":"2025-11-26T05:26:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.790592 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.790654 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.790670 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.790696 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.790714 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:20Z","lastTransitionTime":"2025-11-26T05:26:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.893821 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.894206 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.894452 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.894690 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.894836 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:20Z","lastTransitionTime":"2025-11-26T05:26:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.997585 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.997947 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.998336 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.998680 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:20 crc kubenswrapper[4871]: I1126 05:26:20.998838 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:20Z","lastTransitionTime":"2025-11-26T05:26:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.102081 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.102488 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.102797 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.103020 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.103227 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:21Z","lastTransitionTime":"2025-11-26T05:26:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.206273 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.206710 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.206905 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.207048 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.207185 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:21Z","lastTransitionTime":"2025-11-26T05:26:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.310289 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.310347 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.310364 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.310389 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.310408 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:21Z","lastTransitionTime":"2025-11-26T05:26:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.413176 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.413711 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.413983 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.414154 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.414300 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:21Z","lastTransitionTime":"2025-11-26T05:26:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.517212 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.517511 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.517623 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.517724 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.517825 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:21Z","lastTransitionTime":"2025-11-26T05:26:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.621012 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.621072 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.621090 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.621148 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.621166 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:21Z","lastTransitionTime":"2025-11-26T05:26:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.724918 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.724968 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.724986 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.725010 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.725028 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:21Z","lastTransitionTime":"2025-11-26T05:26:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.827875 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.827961 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.827994 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.828027 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.828048 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:21Z","lastTransitionTime":"2025-11-26T05:26:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.932079 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.932157 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.932177 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.932203 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:21 crc kubenswrapper[4871]: I1126 05:26:21.932222 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:21Z","lastTransitionTime":"2025-11-26T05:26:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.035233 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.035302 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.035325 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.035373 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.035392 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:22Z","lastTransitionTime":"2025-11-26T05:26:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.138134 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.138506 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.138767 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.138931 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.139065 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:22Z","lastTransitionTime":"2025-11-26T05:26:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.242808 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.243113 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.243286 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.243423 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.243744 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:22Z","lastTransitionTime":"2025-11-26T05:26:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.347125 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.347206 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.347226 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.347250 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.347268 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:22Z","lastTransitionTime":"2025-11-26T05:26:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.451766 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.451838 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.451864 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.451893 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.451913 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:22Z","lastTransitionTime":"2025-11-26T05:26:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.506895 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.506954 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.506922 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:22 crc kubenswrapper[4871]: E1126 05:26:22.507139 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.507156 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:22 crc kubenswrapper[4871]: E1126 05:26:22.507288 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:22 crc kubenswrapper[4871]: E1126 05:26:22.508006 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:22 crc kubenswrapper[4871]: E1126 05:26:22.508216 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.530707 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.551134 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.555418 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.555669 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.555813 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.556012 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.556209 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:22Z","lastTransitionTime":"2025-11-26T05:26:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.581152 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.603049 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.637816 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"message\\\":\\\"ePort{ServicePort{Name:https,Protocol:TCP,Port:443,TargetPort:{0 10257 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{kube-controller-manager: true,},ClusterIP:10.217.4.36,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.36],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1126 05:26:14.951497 6305 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1126 05:26:14.951501 6305 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1126 05:26:14.951510 6305 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1126 05:26:14.951562 6305 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nF1126 05:26:14.951571 6305 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.654619 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.658606 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.658788 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.658913 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.659042 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.659161 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:22Z","lastTransitionTime":"2025-11-26T05:26:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.670825 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc94d0cf979cf21c04eefe998ba381ea922779f079981532be41e154faa17e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://735bd64ef711a24fc257d682557fc22bb34bb80f5a0ed913d575ce78a84cb01c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.689070 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.703842 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.719920 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.731378 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.751511 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.761490 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.761554 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.761567 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.761586 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.761609 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:22Z","lastTransitionTime":"2025-11-26T05:26:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.769204 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.784659 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.799614 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.815660 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30b3c82b-ca2a-4821-86e0-94aa2afce847\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:16Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-z2d5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.864459 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.864515 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.864561 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.864585 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.864603 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:22Z","lastTransitionTime":"2025-11-26T05:26:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.967615 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.967659 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.967675 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.967724 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:22 crc kubenswrapper[4871]: I1126 05:26:22.967740 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:22Z","lastTransitionTime":"2025-11-26T05:26:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.070646 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.070699 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.070711 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.070728 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.070739 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:23Z","lastTransitionTime":"2025-11-26T05:26:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.173808 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.173873 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.173897 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.173933 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.173959 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:23Z","lastTransitionTime":"2025-11-26T05:26:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.278600 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.278672 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.278695 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.278726 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.278747 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:23Z","lastTransitionTime":"2025-11-26T05:26:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.381289 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.381356 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.381374 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.381400 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.381418 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:23Z","lastTransitionTime":"2025-11-26T05:26:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.484615 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.484680 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.484702 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.484732 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.484754 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:23Z","lastTransitionTime":"2025-11-26T05:26:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.588326 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.588387 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.588405 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.588431 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.588449 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:23Z","lastTransitionTime":"2025-11-26T05:26:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.693286 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.693362 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.693390 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.693425 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.693449 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:23Z","lastTransitionTime":"2025-11-26T05:26:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.796432 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.796500 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.796516 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.796667 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.796707 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:23Z","lastTransitionTime":"2025-11-26T05:26:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.900211 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.900712 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.900890 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.901118 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:23 crc kubenswrapper[4871]: I1126 05:26:23.901291 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:23Z","lastTransitionTime":"2025-11-26T05:26:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.004424 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.004509 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.004521 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.004547 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.004556 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:24Z","lastTransitionTime":"2025-11-26T05:26:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.109020 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.109342 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.109371 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.109400 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.109431 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:24Z","lastTransitionTime":"2025-11-26T05:26:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.212798 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.212872 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.212894 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.212924 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.212948 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:24Z","lastTransitionTime":"2025-11-26T05:26:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.316069 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.316133 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.316157 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.316186 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.316217 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:24Z","lastTransitionTime":"2025-11-26T05:26:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.419587 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.419654 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.419672 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.419695 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.419712 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:24Z","lastTransitionTime":"2025-11-26T05:26:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.507208 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.507294 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.507302 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.507395 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:24 crc kubenswrapper[4871]: E1126 05:26:24.507399 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:24 crc kubenswrapper[4871]: E1126 05:26:24.507473 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:24 crc kubenswrapper[4871]: E1126 05:26:24.507567 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:24 crc kubenswrapper[4871]: E1126 05:26:24.507648 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.521773 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.521835 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.521857 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.521884 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.521908 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:24Z","lastTransitionTime":"2025-11-26T05:26:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.590211 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs\") pod \"network-metrics-daemon-z2d5h\" (UID: \"30b3c82b-ca2a-4821-86e0-94aa2afce847\") " pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:24 crc kubenswrapper[4871]: E1126 05:26:24.590397 4871 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 05:26:24 crc kubenswrapper[4871]: E1126 05:26:24.590470 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs podName:30b3c82b-ca2a-4821-86e0-94aa2afce847 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:32.590449652 +0000 UTC m=+50.773501268 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs") pod "network-metrics-daemon-z2d5h" (UID: "30b3c82b-ca2a-4821-86e0-94aa2afce847") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.624427 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.624492 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.624515 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.624576 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.624601 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:24Z","lastTransitionTime":"2025-11-26T05:26:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.727923 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.727970 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.727988 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.728032 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.728050 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:24Z","lastTransitionTime":"2025-11-26T05:26:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.830518 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.830819 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.830836 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.830862 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.830890 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:24Z","lastTransitionTime":"2025-11-26T05:26:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.933448 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.933508 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.933558 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.933625 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:24 crc kubenswrapper[4871]: I1126 05:26:24.933648 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:24Z","lastTransitionTime":"2025-11-26T05:26:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.036517 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.036668 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.036695 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.036725 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.036745 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:25Z","lastTransitionTime":"2025-11-26T05:26:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.140315 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.140379 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.140396 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.140422 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.140439 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:25Z","lastTransitionTime":"2025-11-26T05:26:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.243997 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.244060 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.244077 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.244101 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.244118 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:25Z","lastTransitionTime":"2025-11-26T05:26:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.348391 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.348455 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.348473 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.348497 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.348515 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:25Z","lastTransitionTime":"2025-11-26T05:26:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.451102 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.451188 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.451212 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.451246 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.451277 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:25Z","lastTransitionTime":"2025-11-26T05:26:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.554006 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.554081 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.554098 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.554123 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.554141 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:25Z","lastTransitionTime":"2025-11-26T05:26:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.657436 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.657473 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.657484 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.657499 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.657509 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:25Z","lastTransitionTime":"2025-11-26T05:26:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.760637 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.760702 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.760719 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.760743 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.760759 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:25Z","lastTransitionTime":"2025-11-26T05:26:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.863566 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.863642 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.863667 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.863701 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.863726 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:25Z","lastTransitionTime":"2025-11-26T05:26:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.966121 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.966273 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.966317 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.966358 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:25 crc kubenswrapper[4871]: I1126 05:26:25.966381 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:25Z","lastTransitionTime":"2025-11-26T05:26:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.068403 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.068451 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.068464 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.068482 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.068494 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:26Z","lastTransitionTime":"2025-11-26T05:26:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.171417 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.171521 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.171570 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.171602 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.171620 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:26Z","lastTransitionTime":"2025-11-26T05:26:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.274132 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.274509 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.274613 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.274641 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.274658 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:26Z","lastTransitionTime":"2025-11-26T05:26:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.377394 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.377461 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.377483 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.377516 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.377635 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:26Z","lastTransitionTime":"2025-11-26T05:26:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.480970 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.481058 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.481086 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.481118 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.481143 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:26Z","lastTransitionTime":"2025-11-26T05:26:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.506681 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.506727 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:26 crc kubenswrapper[4871]: E1126 05:26:26.506906 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.506986 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.507005 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:26 crc kubenswrapper[4871]: E1126 05:26:26.507141 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:26 crc kubenswrapper[4871]: E1126 05:26:26.507285 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:26 crc kubenswrapper[4871]: E1126 05:26:26.507840 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.584114 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.584195 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.584214 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.584709 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.584771 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:26Z","lastTransitionTime":"2025-11-26T05:26:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.688275 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.688329 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.688347 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.688386 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.688404 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:26Z","lastTransitionTime":"2025-11-26T05:26:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.791609 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.791666 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.791682 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.791706 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.791723 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:26Z","lastTransitionTime":"2025-11-26T05:26:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.894921 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.894973 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.894990 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.895013 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.895032 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:26Z","lastTransitionTime":"2025-11-26T05:26:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.998153 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.998229 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.998254 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.998282 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:26 crc kubenswrapper[4871]: I1126 05:26:26.998303 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:26Z","lastTransitionTime":"2025-11-26T05:26:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.100997 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.101056 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.101066 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.101091 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.101111 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:27Z","lastTransitionTime":"2025-11-26T05:26:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.203831 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.203884 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.203904 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.203946 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.203976 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:27Z","lastTransitionTime":"2025-11-26T05:26:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.306862 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.306940 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.306966 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.306995 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.307016 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:27Z","lastTransitionTime":"2025-11-26T05:26:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.410854 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.410916 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.410933 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.410957 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.410975 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:27Z","lastTransitionTime":"2025-11-26T05:26:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.513857 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.513913 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.513939 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.513966 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.513990 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:27Z","lastTransitionTime":"2025-11-26T05:26:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.615685 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.615720 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.615728 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.615741 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.615750 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:27Z","lastTransitionTime":"2025-11-26T05:26:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.719025 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.719069 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.719080 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.719097 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.719109 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:27Z","lastTransitionTime":"2025-11-26T05:26:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.821827 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.821891 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.821903 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.821918 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.821929 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:27Z","lastTransitionTime":"2025-11-26T05:26:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.924667 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.924727 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.924744 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.924769 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:27 crc kubenswrapper[4871]: I1126 05:26:27.924786 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:27Z","lastTransitionTime":"2025-11-26T05:26:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.027508 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.027596 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.027607 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.027622 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.027634 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:28Z","lastTransitionTime":"2025-11-26T05:26:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.130279 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.130344 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.130368 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.130401 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.130421 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:28Z","lastTransitionTime":"2025-11-26T05:26:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.233214 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.233276 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.233296 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.233320 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.233338 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:28Z","lastTransitionTime":"2025-11-26T05:26:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.336163 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.336210 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.336227 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.336248 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.336264 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:28Z","lastTransitionTime":"2025-11-26T05:26:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.440019 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.440083 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.440106 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.440134 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.440155 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:28Z","lastTransitionTime":"2025-11-26T05:26:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.506928 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.506927 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.507059 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:28 crc kubenswrapper[4871]: E1126 05:26:28.507227 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.507253 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:28 crc kubenswrapper[4871]: E1126 05:26:28.507400 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:28 crc kubenswrapper[4871]: E1126 05:26:28.507598 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:28 crc kubenswrapper[4871]: E1126 05:26:28.507719 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.544016 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.544069 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.544087 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.544111 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.544130 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:28Z","lastTransitionTime":"2025-11-26T05:26:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.647463 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.647638 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.647702 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.647735 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.647760 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:28Z","lastTransitionTime":"2025-11-26T05:26:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.751072 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.751120 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.751137 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.751158 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.751177 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:28Z","lastTransitionTime":"2025-11-26T05:26:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.853882 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.853944 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.853961 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.853985 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.854002 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:28Z","lastTransitionTime":"2025-11-26T05:26:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.956963 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.957038 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.957059 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.957084 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:28 crc kubenswrapper[4871]: I1126 05:26:28.957101 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:28Z","lastTransitionTime":"2025-11-26T05:26:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.060390 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.060437 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.060456 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.060479 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.060497 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:29Z","lastTransitionTime":"2025-11-26T05:26:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.183034 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.183066 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.183076 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.183092 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.183103 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:29Z","lastTransitionTime":"2025-11-26T05:26:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.186644 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.186678 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.186689 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.186705 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.186715 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:29Z","lastTransitionTime":"2025-11-26T05:26:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:29 crc kubenswrapper[4871]: E1126 05:26:29.210388 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:29Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.214646 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.214721 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.214738 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.214759 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.214776 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:29Z","lastTransitionTime":"2025-11-26T05:26:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:29 crc kubenswrapper[4871]: E1126 05:26:29.229010 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:29Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.232107 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.232152 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.232164 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.232182 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.232196 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:29Z","lastTransitionTime":"2025-11-26T05:26:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:29 crc kubenswrapper[4871]: E1126 05:26:29.245963 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:29Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.249479 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.249511 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.249538 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.249565 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.249576 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:29Z","lastTransitionTime":"2025-11-26T05:26:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:29 crc kubenswrapper[4871]: E1126 05:26:29.267564 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:29Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.271802 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.271847 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.271866 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.271884 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.271896 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:29Z","lastTransitionTime":"2025-11-26T05:26:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:29 crc kubenswrapper[4871]: E1126 05:26:29.287004 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:29Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:29 crc kubenswrapper[4871]: E1126 05:26:29.287159 4871 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.289024 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.289081 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.289093 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.289111 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.289123 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:29Z","lastTransitionTime":"2025-11-26T05:26:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.391685 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.391752 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.391776 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.391807 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.391829 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:29Z","lastTransitionTime":"2025-11-26T05:26:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.495079 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.495138 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.495161 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.495190 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.495212 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:29Z","lastTransitionTime":"2025-11-26T05:26:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.598222 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.598317 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.598340 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.598370 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.598393 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:29Z","lastTransitionTime":"2025-11-26T05:26:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.700913 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.700981 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.701005 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.701030 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.701047 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:29Z","lastTransitionTime":"2025-11-26T05:26:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.804170 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.804221 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.804237 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.804260 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.804276 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:29Z","lastTransitionTime":"2025-11-26T05:26:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.906742 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.906780 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.906791 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.906805 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:29 crc kubenswrapper[4871]: I1126 05:26:29.906816 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:29Z","lastTransitionTime":"2025-11-26T05:26:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.010131 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.010191 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.010213 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.010242 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.010264 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:30Z","lastTransitionTime":"2025-11-26T05:26:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.113438 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.113497 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.113514 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.113566 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.113584 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:30Z","lastTransitionTime":"2025-11-26T05:26:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.216958 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.217019 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.217037 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.217061 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.217078 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:30Z","lastTransitionTime":"2025-11-26T05:26:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.320457 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.320592 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.320617 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.320650 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.320670 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:30Z","lastTransitionTime":"2025-11-26T05:26:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.423863 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.423922 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.423939 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.423965 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.423983 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:30Z","lastTransitionTime":"2025-11-26T05:26:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.506401 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.506401 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.506407 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.506598 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:30 crc kubenswrapper[4871]: E1126 05:26:30.506702 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:30 crc kubenswrapper[4871]: E1126 05:26:30.506867 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:30 crc kubenswrapper[4871]: E1126 05:26:30.506979 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:30 crc kubenswrapper[4871]: E1126 05:26:30.507631 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.508229 4871 scope.go:117] "RemoveContainer" containerID="ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.530368 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.530434 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.530455 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.530482 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.530504 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:30Z","lastTransitionTime":"2025-11-26T05:26:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.632671 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.633029 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.633201 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.633350 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.633505 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:30Z","lastTransitionTime":"2025-11-26T05:26:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.737804 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.737857 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.737877 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.737899 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.737915 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:30Z","lastTransitionTime":"2025-11-26T05:26:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.841393 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.841475 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.841501 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.841559 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.841596 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:30Z","lastTransitionTime":"2025-11-26T05:26:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.921000 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovnkube-controller/1.log" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.925917 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerStarted","Data":"e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f"} Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.926738 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.943682 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.943727 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.943745 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.943769 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.943786 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:30Z","lastTransitionTime":"2025-11-26T05:26:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.961589 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"message\\\":\\\"ePort{ServicePort{Name:https,Protocol:TCP,Port:443,TargetPort:{0 10257 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{kube-controller-manager: true,},ClusterIP:10.217.4.36,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.36],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1126 05:26:14.951497 6305 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1126 05:26:14.951501 6305 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1126 05:26:14.951510 6305 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1126 05:26:14.951562 6305 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nF1126 05:26:14.951571 6305 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:30Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:30 crc kubenswrapper[4871]: I1126 05:26:30.979875 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:30Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.012112 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.036968 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.045593 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.045629 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.045642 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.045657 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.045668 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:31Z","lastTransitionTime":"2025-11-26T05:26:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.050090 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.061026 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.079584 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.088998 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc94d0cf979cf21c04eefe998ba381ea922779f079981532be41e154faa17e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://735bd64ef711a24fc257d682557fc22bb34bb80f5a0ed913d575ce78a84cb01c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.098821 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.109328 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.120947 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30b3c82b-ca2a-4821-86e0-94aa2afce847\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:16Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-z2d5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.132458 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.142957 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.147304 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.147364 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.147382 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.147402 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.147420 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:31Z","lastTransitionTime":"2025-11-26T05:26:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.156240 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.171644 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.182770 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.249986 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.250027 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.250036 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.250052 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.250062 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:31Z","lastTransitionTime":"2025-11-26T05:26:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.352047 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.352104 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.352125 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.352149 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.352167 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:31Z","lastTransitionTime":"2025-11-26T05:26:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.455331 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.455384 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.455400 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.455429 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.455445 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:31Z","lastTransitionTime":"2025-11-26T05:26:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.558712 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.558788 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.558811 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.558840 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.558862 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:31Z","lastTransitionTime":"2025-11-26T05:26:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.665730 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.665863 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.665925 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.665963 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.666062 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:31Z","lastTransitionTime":"2025-11-26T05:26:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.769210 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.769270 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.769286 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.769309 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.769326 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:31Z","lastTransitionTime":"2025-11-26T05:26:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.872828 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.872929 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.872966 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.873001 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.873023 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:31Z","lastTransitionTime":"2025-11-26T05:26:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.933283 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovnkube-controller/2.log" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.934457 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovnkube-controller/1.log" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.938723 4871 generic.go:334] "Generic (PLEG): container finished" podID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerID="e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f" exitCode=1 Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.938786 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerDied","Data":"e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f"} Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.938847 4871 scope.go:117] "RemoveContainer" containerID="ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.940010 4871 scope.go:117] "RemoveContainer" containerID="e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f" Nov 26 05:26:31 crc kubenswrapper[4871]: E1126 05:26:31.940343 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.960925 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.974710 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.974795 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.974814 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.974835 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.974849 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:31Z","lastTransitionTime":"2025-11-26T05:26:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:31 crc kubenswrapper[4871]: I1126 05:26:31.983839 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.007014 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.039811 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"message\\\":\\\"ePort{ServicePort{Name:https,Protocol:TCP,Port:443,TargetPort:{0 10257 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{kube-controller-manager: true,},ClusterIP:10.217.4.36,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.36],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1126 05:26:14.951497 6305 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1126 05:26:14.951501 6305 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1126 05:26:14.951510 6305 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1126 05:26:14.951562 6305 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nF1126 05:26:14.951571 6305 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:31Z\\\",\\\"message\\\":\\\"er during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z]\\\\nI1126 05:26:31.460634 6510 services_controller.go:453] Built service openshift-apiserver/check-endpoints template LB for network=default: []services.LB{}\\\\nI1126 05:26:31.460771 6510 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver/apiserver_TCP_cluster\\\\\\\", UUID:\\\\\\\"d71b38eb-32af-4c0f-9490-7c317c111e3a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver/apiserver\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]st\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.056273 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.078256 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.078328 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.078345 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.078837 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.078891 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:32Z","lastTransitionTime":"2025-11-26T05:26:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.080100 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.099005 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc94d0cf979cf21c04eefe998ba381ea922779f079981532be41e154faa17e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://735bd64ef711a24fc257d682557fc22bb34bb80f5a0ed913d575ce78a84cb01c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.118448 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.139334 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.156941 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.176187 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.184616 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.184703 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.184727 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.184757 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.184781 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:32Z","lastTransitionTime":"2025-11-26T05:26:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.197499 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.217102 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.234205 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30b3c82b-ca2a-4821-86e0-94aa2afce847\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:16Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-z2d5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.255987 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.273744 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.288096 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.288163 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.288187 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.288231 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.288255 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:32Z","lastTransitionTime":"2025-11-26T05:26:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.390499 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.390565 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.390581 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.390600 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.390614 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:32Z","lastTransitionTime":"2025-11-26T05:26:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.492825 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.492874 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.492891 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.492911 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.492927 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:32Z","lastTransitionTime":"2025-11-26T05:26:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.506329 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.506377 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.506435 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:32 crc kubenswrapper[4871]: E1126 05:26:32.506479 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:32 crc kubenswrapper[4871]: E1126 05:26:32.506739 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:32 crc kubenswrapper[4871]: E1126 05:26:32.507761 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.507862 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:32 crc kubenswrapper[4871]: E1126 05:26:32.508002 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.528092 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.549665 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.568666 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30b3c82b-ca2a-4821-86e0-94aa2afce847\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:16Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-z2d5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.590839 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.602940 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.603124 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.603146 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.603174 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.603192 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:32Z","lastTransitionTime":"2025-11-26T05:26:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.614334 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.633434 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.657284 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.678717 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.683285 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs\") pod \"network-metrics-daemon-z2d5h\" (UID: \"30b3c82b-ca2a-4821-86e0-94aa2afce847\") " pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:32 crc kubenswrapper[4871]: E1126 05:26:32.683902 4871 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 05:26:32 crc kubenswrapper[4871]: E1126 05:26:32.684097 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs podName:30b3c82b-ca2a-4821-86e0-94aa2afce847 nodeName:}" failed. No retries permitted until 2025-11-26 05:26:48.6840593 +0000 UTC m=+66.867110926 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs") pod "network-metrics-daemon-z2d5h" (UID: "30b3c82b-ca2a-4821-86e0-94aa2afce847") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.706338 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.706391 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.706403 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.706425 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.706439 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:32Z","lastTransitionTime":"2025-11-26T05:26:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.713736 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ec8037aad965400b39cabe8405aeca30ace7393c276be1723f13cf348923f6e3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"message\\\":\\\"ePort{ServicePort{Name:https,Protocol:TCP,Port:443,TargetPort:{0 10257 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{kube-controller-manager: true,},ClusterIP:10.217.4.36,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.36],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1126 05:26:14.951497 6305 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1126 05:26:14.951501 6305 obj_retry.go:303] Retry object setup: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1126 05:26:14.951510 6305 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1126 05:26:14.951562 6305 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nF1126 05:26:14.951571 6305 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:31Z\\\",\\\"message\\\":\\\"er during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z]\\\\nI1126 05:26:31.460634 6510 services_controller.go:453] Built service openshift-apiserver/check-endpoints template LB for network=default: []services.LB{}\\\\nI1126 05:26:31.460771 6510 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver/apiserver_TCP_cluster\\\\\\\", UUID:\\\\\\\"d71b38eb-32af-4c0f-9490-7c317c111e3a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver/apiserver\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]st\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.728666 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.747951 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.763796 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.780884 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.795015 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.808918 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.808948 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.808959 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.808976 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.808986 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:32Z","lastTransitionTime":"2025-11-26T05:26:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.817578 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.834814 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc94d0cf979cf21c04eefe998ba381ea922779f079981532be41e154faa17e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://735bd64ef711a24fc257d682557fc22bb34bb80f5a0ed913d575ce78a84cb01c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.911372 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.911407 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.911419 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.911435 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.911446 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:32Z","lastTransitionTime":"2025-11-26T05:26:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.944397 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovnkube-controller/2.log" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.950660 4871 scope.go:117] "RemoveContainer" containerID="e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f" Nov 26 05:26:32 crc kubenswrapper[4871]: E1126 05:26:32.950880 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.971463 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:32 crc kubenswrapper[4871]: I1126 05:26:32.989975 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:32Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.008778 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:33Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.013407 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.013444 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.013458 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.013477 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.013491 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:33Z","lastTransitionTime":"2025-11-26T05:26:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.023815 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30b3c82b-ca2a-4821-86e0-94aa2afce847\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:16Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-z2d5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:33Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.043284 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:33Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.062160 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:33Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.085168 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:33Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.107260 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:33Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.116295 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.116497 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.116662 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.116781 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.116890 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:33Z","lastTransitionTime":"2025-11-26T05:26:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.139390 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:31Z\\\",\\\"message\\\":\\\"er during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z]\\\\nI1126 05:26:31.460634 6510 services_controller.go:453] Built service openshift-apiserver/check-endpoints template LB for network=default: []services.LB{}\\\\nI1126 05:26:31.460771 6510 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver/apiserver_TCP_cluster\\\\\\\", UUID:\\\\\\\"d71b38eb-32af-4c0f-9490-7c317c111e3a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver/apiserver\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]st\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:33Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.154634 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:33Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.170677 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc94d0cf979cf21c04eefe998ba381ea922779f079981532be41e154faa17e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://735bd64ef711a24fc257d682557fc22bb34bb80f5a0ed913d575ce78a84cb01c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:33Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.189631 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:33Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.209132 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:33Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.220409 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.220519 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.220612 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.220703 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.220732 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:33Z","lastTransitionTime":"2025-11-26T05:26:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.223438 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:33Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.240038 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:33Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.262191 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:33Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.323717 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.323792 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.323812 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.323840 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.323858 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:33Z","lastTransitionTime":"2025-11-26T05:26:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.427330 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.427387 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.427412 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.427442 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.427465 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:33Z","lastTransitionTime":"2025-11-26T05:26:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.530377 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.530443 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.530460 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.530487 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.530505 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:33Z","lastTransitionTime":"2025-11-26T05:26:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.634892 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.634960 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.634979 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.635007 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.635026 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:33Z","lastTransitionTime":"2025-11-26T05:26:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.738024 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.738353 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.738553 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.738857 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.739041 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:33Z","lastTransitionTime":"2025-11-26T05:26:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.841980 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.842308 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.842483 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.842707 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.842938 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:33Z","lastTransitionTime":"2025-11-26T05:26:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.946204 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.946255 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.946266 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.946281 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:33 crc kubenswrapper[4871]: I1126 05:26:33.946291 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:33Z","lastTransitionTime":"2025-11-26T05:26:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.048937 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.048983 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.049001 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.049029 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.049051 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:34Z","lastTransitionTime":"2025-11-26T05:26:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.151559 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.151594 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.151605 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.151618 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.151627 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:34Z","lastTransitionTime":"2025-11-26T05:26:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.201689 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.201763 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:34 crc kubenswrapper[4871]: E1126 05:26:34.201958 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 05:26:34 crc kubenswrapper[4871]: E1126 05:26:34.201986 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 05:26:34 crc kubenswrapper[4871]: E1126 05:26:34.202005 4871 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:34 crc kubenswrapper[4871]: E1126 05:26:34.202067 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 05:27:06.202047307 +0000 UTC m=+84.385098933 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:34 crc kubenswrapper[4871]: E1126 05:26:34.202594 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 05:26:34 crc kubenswrapper[4871]: E1126 05:26:34.202620 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 05:26:34 crc kubenswrapper[4871]: E1126 05:26:34.202635 4871 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:34 crc kubenswrapper[4871]: E1126 05:26:34.202683 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 05:27:06.202667362 +0000 UTC m=+84.385718978 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.254576 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.254608 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.254621 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.254638 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.254649 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:34Z","lastTransitionTime":"2025-11-26T05:26:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.302387 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:26:34 crc kubenswrapper[4871]: E1126 05:26:34.302726 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:27:06.302687815 +0000 UTC m=+84.485739441 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.358147 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.358212 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.358230 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.358254 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.358276 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:34Z","lastTransitionTime":"2025-11-26T05:26:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.404146 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.404313 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:34 crc kubenswrapper[4871]: E1126 05:26:34.404404 4871 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 05:26:34 crc kubenswrapper[4871]: E1126 05:26:34.404536 4871 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 05:26:34 crc kubenswrapper[4871]: E1126 05:26:34.404555 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 05:27:06.40449717 +0000 UTC m=+84.587548796 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 05:26:34 crc kubenswrapper[4871]: E1126 05:26:34.404699 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 05:27:06.404619183 +0000 UTC m=+84.587670839 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.461574 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.461633 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.461698 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.461728 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.461750 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:34Z","lastTransitionTime":"2025-11-26T05:26:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.506672 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.506778 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:34 crc kubenswrapper[4871]: E1126 05:26:34.506869 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.506882 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.506944 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:34 crc kubenswrapper[4871]: E1126 05:26:34.507162 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:34 crc kubenswrapper[4871]: E1126 05:26:34.507306 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:34 crc kubenswrapper[4871]: E1126 05:26:34.507453 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.565075 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.565150 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.565175 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.565205 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.565227 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:34Z","lastTransitionTime":"2025-11-26T05:26:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.668446 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.668507 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.668548 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.668574 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.668596 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:34Z","lastTransitionTime":"2025-11-26T05:26:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.771176 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.771229 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.771240 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.771261 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.771272 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:34Z","lastTransitionTime":"2025-11-26T05:26:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.873856 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.873885 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.873893 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.873907 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.873915 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:34Z","lastTransitionTime":"2025-11-26T05:26:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.976899 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.976966 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.976989 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.977019 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:34 crc kubenswrapper[4871]: I1126 05:26:34.977041 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:34Z","lastTransitionTime":"2025-11-26T05:26:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.080575 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.080636 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.080657 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.080681 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.080698 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:35Z","lastTransitionTime":"2025-11-26T05:26:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.183689 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.183749 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.183767 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.183792 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.183808 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:35Z","lastTransitionTime":"2025-11-26T05:26:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.286823 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.286897 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.286915 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.286941 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.286962 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:35Z","lastTransitionTime":"2025-11-26T05:26:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.390166 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.390223 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.390241 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.390264 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.390281 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:35Z","lastTransitionTime":"2025-11-26T05:26:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.492791 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.492853 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.492871 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.492896 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.492915 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:35Z","lastTransitionTime":"2025-11-26T05:26:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.596090 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.596164 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.596186 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.596213 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.596234 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:35Z","lastTransitionTime":"2025-11-26T05:26:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.699102 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.699158 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.699168 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.699181 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.699190 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:35Z","lastTransitionTime":"2025-11-26T05:26:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.801917 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.801985 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.802002 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.802030 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.802047 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:35Z","lastTransitionTime":"2025-11-26T05:26:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.905709 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.905789 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.905812 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.905842 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:35 crc kubenswrapper[4871]: I1126 05:26:35.905860 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:35Z","lastTransitionTime":"2025-11-26T05:26:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.009728 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.009792 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.009809 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.009835 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.009855 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:36Z","lastTransitionTime":"2025-11-26T05:26:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.114722 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.114811 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.114829 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.114853 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.114900 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:36Z","lastTransitionTime":"2025-11-26T05:26:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.218133 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.218192 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.218209 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.218235 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.218254 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:36Z","lastTransitionTime":"2025-11-26T05:26:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.313093 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.322271 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.322349 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.322368 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.322394 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.322410 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:36Z","lastTransitionTime":"2025-11-26T05:26:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.328120 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.333150 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:36Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.355134 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:36Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.377428 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:36Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.408254 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:31Z\\\",\\\"message\\\":\\\"er during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z]\\\\nI1126 05:26:31.460634 6510 services_controller.go:453] Built service openshift-apiserver/check-endpoints template LB for network=default: []services.LB{}\\\\nI1126 05:26:31.460771 6510 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver/apiserver_TCP_cluster\\\\\\\", UUID:\\\\\\\"d71b38eb-32af-4c0f-9490-7c317c111e3a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver/apiserver\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]st\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:36Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.425986 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.426072 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.426092 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.426115 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.426161 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:36Z","lastTransitionTime":"2025-11-26T05:26:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.426703 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:36Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.450434 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:36Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.469225 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc94d0cf979cf21c04eefe998ba381ea922779f079981532be41e154faa17e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://735bd64ef711a24fc257d682557fc22bb34bb80f5a0ed913d575ce78a84cb01c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:36Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.492487 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:36Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.507062 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.507188 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:36 crc kubenswrapper[4871]: E1126 05:26:36.507224 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:36 crc kubenswrapper[4871]: E1126 05:26:36.507391 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.507910 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.508025 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:36 crc kubenswrapper[4871]: E1126 05:26:36.508493 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:36 crc kubenswrapper[4871]: E1126 05:26:36.508701 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.513756 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:36Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.529291 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.529392 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.529408 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.529429 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.529445 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:36Z","lastTransitionTime":"2025-11-26T05:26:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.532344 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:36Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.554052 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:36Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.573268 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:36Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.591637 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:36Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.606795 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30b3c82b-ca2a-4821-86e0-94aa2afce847\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:16Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-z2d5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:36Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.626364 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:36Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.632023 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.632065 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.632082 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.632106 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.632123 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:36Z","lastTransitionTime":"2025-11-26T05:26:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.643809 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:36Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.735690 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.735762 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.735787 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.735815 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.735909 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:36Z","lastTransitionTime":"2025-11-26T05:26:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.839868 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.840243 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.840391 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.840570 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.840732 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:36Z","lastTransitionTime":"2025-11-26T05:26:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.945107 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.945576 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.945807 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.945991 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:36 crc kubenswrapper[4871]: I1126 05:26:36.946254 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:36Z","lastTransitionTime":"2025-11-26T05:26:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.049620 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.049681 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.049705 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.049734 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.049757 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:37Z","lastTransitionTime":"2025-11-26T05:26:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.153232 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.153649 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.153814 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.153960 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.154092 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:37Z","lastTransitionTime":"2025-11-26T05:26:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.257643 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.257709 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.257728 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.257753 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.257771 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:37Z","lastTransitionTime":"2025-11-26T05:26:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.360034 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.360421 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.360624 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.360794 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.360925 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:37Z","lastTransitionTime":"2025-11-26T05:26:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.464681 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.464765 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.464782 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.464834 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.464851 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:37Z","lastTransitionTime":"2025-11-26T05:26:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.567662 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.567731 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.567754 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.567777 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.567795 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:37Z","lastTransitionTime":"2025-11-26T05:26:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.670621 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.671126 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.671318 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.671501 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.671730 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:37Z","lastTransitionTime":"2025-11-26T05:26:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.775264 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.775329 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.775347 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.775371 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.775391 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:37Z","lastTransitionTime":"2025-11-26T05:26:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.877893 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.877971 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.877992 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.878017 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.878036 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:37Z","lastTransitionTime":"2025-11-26T05:26:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.981108 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.981179 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.981201 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.981230 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:37 crc kubenswrapper[4871]: I1126 05:26:37.981264 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:37Z","lastTransitionTime":"2025-11-26T05:26:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.084878 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.084961 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.084988 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.085019 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.085042 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:38Z","lastTransitionTime":"2025-11-26T05:26:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.187927 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.187983 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.188003 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.188033 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.188056 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:38Z","lastTransitionTime":"2025-11-26T05:26:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.291290 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.291362 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.291381 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.291402 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.291419 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:38Z","lastTransitionTime":"2025-11-26T05:26:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.394504 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.394566 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.394578 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.394598 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.394615 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:38Z","lastTransitionTime":"2025-11-26T05:26:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.497812 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.497851 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.497859 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.497875 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.497884 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:38Z","lastTransitionTime":"2025-11-26T05:26:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.507212 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.507227 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.507250 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:38 crc kubenswrapper[4871]: E1126 05:26:38.507319 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.507341 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:38 crc kubenswrapper[4871]: E1126 05:26:38.507455 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:38 crc kubenswrapper[4871]: E1126 05:26:38.507627 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:38 crc kubenswrapper[4871]: E1126 05:26:38.507763 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.600948 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.601236 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.601333 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.601406 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.601477 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:38Z","lastTransitionTime":"2025-11-26T05:26:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.703979 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.704224 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.704289 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.704368 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.704433 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:38Z","lastTransitionTime":"2025-11-26T05:26:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.806821 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.806885 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.806903 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.806930 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.806949 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:38Z","lastTransitionTime":"2025-11-26T05:26:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.909908 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.910348 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.910769 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.910965 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:38 crc kubenswrapper[4871]: I1126 05:26:38.911094 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:38Z","lastTransitionTime":"2025-11-26T05:26:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.013627 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.013688 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.013707 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.013733 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.013750 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:39Z","lastTransitionTime":"2025-11-26T05:26:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.121794 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.121882 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.121908 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.121941 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.121963 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:39Z","lastTransitionTime":"2025-11-26T05:26:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.224369 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.224407 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.224418 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.224433 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.224443 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:39Z","lastTransitionTime":"2025-11-26T05:26:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.327122 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.327164 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.327174 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.327190 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.327199 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:39Z","lastTransitionTime":"2025-11-26T05:26:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.429268 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.429717 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.429736 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.429755 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.429769 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:39Z","lastTransitionTime":"2025-11-26T05:26:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.464342 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.464643 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.464795 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.464921 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.465053 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:39Z","lastTransitionTime":"2025-11-26T05:26:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:39 crc kubenswrapper[4871]: E1126 05:26:39.485684 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:39Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.491034 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.491086 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.491107 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.491135 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.491157 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:39Z","lastTransitionTime":"2025-11-26T05:26:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:39 crc kubenswrapper[4871]: E1126 05:26:39.513285 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:39Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.519714 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.519774 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.519792 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.519816 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.519834 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:39Z","lastTransitionTime":"2025-11-26T05:26:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:39 crc kubenswrapper[4871]: E1126 05:26:39.540131 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:39Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.545110 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.545214 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.545267 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.545296 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.545314 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:39Z","lastTransitionTime":"2025-11-26T05:26:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:39 crc kubenswrapper[4871]: E1126 05:26:39.567431 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:39Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.573623 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.573687 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.573710 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.573740 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.573766 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:39Z","lastTransitionTime":"2025-11-26T05:26:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:39 crc kubenswrapper[4871]: E1126 05:26:39.593734 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:39Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:39 crc kubenswrapper[4871]: E1126 05:26:39.593954 4871 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.596321 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.596388 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.596406 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.596432 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.596450 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:39Z","lastTransitionTime":"2025-11-26T05:26:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.699667 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.699727 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.699758 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.699784 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.699800 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:39Z","lastTransitionTime":"2025-11-26T05:26:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.803015 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.803114 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.803142 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.803177 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.803199 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:39Z","lastTransitionTime":"2025-11-26T05:26:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.908041 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.908102 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.908119 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.908146 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:39 crc kubenswrapper[4871]: I1126 05:26:39.908167 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:39Z","lastTransitionTime":"2025-11-26T05:26:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.011587 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.011661 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.011685 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.011717 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.011739 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:40Z","lastTransitionTime":"2025-11-26T05:26:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.114891 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.114973 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.114995 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.115027 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.115053 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:40Z","lastTransitionTime":"2025-11-26T05:26:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.218277 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.218341 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.218359 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.218386 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.218409 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:40Z","lastTransitionTime":"2025-11-26T05:26:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.322063 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.322195 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.322259 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.322290 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.322347 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:40Z","lastTransitionTime":"2025-11-26T05:26:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.426022 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.426088 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.426105 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.426130 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.426147 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:40Z","lastTransitionTime":"2025-11-26T05:26:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.506476 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.506565 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.506624 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.506675 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:40 crc kubenswrapper[4871]: E1126 05:26:40.506914 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:40 crc kubenswrapper[4871]: E1126 05:26:40.507102 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:40 crc kubenswrapper[4871]: E1126 05:26:40.507233 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:40 crc kubenswrapper[4871]: E1126 05:26:40.507483 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.529233 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.529289 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.529306 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.529332 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.529352 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:40Z","lastTransitionTime":"2025-11-26T05:26:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.632895 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.632961 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.632981 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.633009 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.633026 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:40Z","lastTransitionTime":"2025-11-26T05:26:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.737362 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.737402 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.737413 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.737428 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.737445 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:40Z","lastTransitionTime":"2025-11-26T05:26:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.841424 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.841501 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.841559 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.841595 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.841616 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:40Z","lastTransitionTime":"2025-11-26T05:26:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.945584 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.945666 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.945682 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.945703 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:40 crc kubenswrapper[4871]: I1126 05:26:40.945718 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:40Z","lastTransitionTime":"2025-11-26T05:26:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.048945 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.049007 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.049027 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.049056 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.049074 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:41Z","lastTransitionTime":"2025-11-26T05:26:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.152604 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.152670 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.152692 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.152721 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.152743 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:41Z","lastTransitionTime":"2025-11-26T05:26:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.256333 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.256424 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.256445 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.256470 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.256487 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:41Z","lastTransitionTime":"2025-11-26T05:26:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.359461 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.359503 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.359514 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.359547 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.359560 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:41Z","lastTransitionTime":"2025-11-26T05:26:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.462878 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.462963 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.462987 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.463017 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.463039 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:41Z","lastTransitionTime":"2025-11-26T05:26:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.565295 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.565354 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.565371 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.565395 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.565412 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:41Z","lastTransitionTime":"2025-11-26T05:26:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.668108 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.668201 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.668220 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.668242 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.668260 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:41Z","lastTransitionTime":"2025-11-26T05:26:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.771099 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.771176 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.771197 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.771221 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.771238 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:41Z","lastTransitionTime":"2025-11-26T05:26:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.874245 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.874303 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.874323 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.874351 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.874374 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:41Z","lastTransitionTime":"2025-11-26T05:26:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.977564 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.977637 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.977658 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.977704 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:41 crc kubenswrapper[4871]: I1126 05:26:41.977738 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:41Z","lastTransitionTime":"2025-11-26T05:26:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.080620 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.080751 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.080776 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.080807 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.080830 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:42Z","lastTransitionTime":"2025-11-26T05:26:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.183989 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.184049 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.184068 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.184091 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.184107 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:42Z","lastTransitionTime":"2025-11-26T05:26:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.287492 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.287598 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.287624 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.287654 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.287674 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:42Z","lastTransitionTime":"2025-11-26T05:26:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.390933 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.390988 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.391004 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.391030 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.391047 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:42Z","lastTransitionTime":"2025-11-26T05:26:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.494077 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.494139 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.494150 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.494171 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.494183 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:42Z","lastTransitionTime":"2025-11-26T05:26:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.506516 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.506590 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.506686 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.506777 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:42 crc kubenswrapper[4871]: E1126 05:26:42.506967 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:42 crc kubenswrapper[4871]: E1126 05:26:42.507152 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:42 crc kubenswrapper[4871]: E1126 05:26:42.507354 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:42 crc kubenswrapper[4871]: E1126 05:26:42.507510 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.540517 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.565060 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.596794 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:31Z\\\",\\\"message\\\":\\\"er during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z]\\\\nI1126 05:26:31.460634 6510 services_controller.go:453] Built service openshift-apiserver/check-endpoints template LB for network=default: []services.LB{}\\\\nI1126 05:26:31.460771 6510 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver/apiserver_TCP_cluster\\\\\\\", UUID:\\\\\\\"d71b38eb-32af-4c0f-9490-7c317c111e3a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver/apiserver\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]st\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.599260 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.599305 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.599324 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.599352 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.599372 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:42Z","lastTransitionTime":"2025-11-26T05:26:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.617809 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.637175 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc94d0cf979cf21c04eefe998ba381ea922779f079981532be41e154faa17e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://735bd64ef711a24fc257d682557fc22bb34bb80f5a0ed913d575ce78a84cb01c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.658798 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"012685e4-7f48-4dc5-8c32-b4acd0ba0788\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e936d7790749be736341822bb370fc8729d1e006bffe538ff480a090b856cce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1e537a2837f366cb6a6343ffdcf998611f07d8c19f4fe9c0111862520ebbe5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f55fc830fdd852727a8ac6714209b06ef8394a19d313752c316fd0901a47f2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.680655 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.700719 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.702213 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.702279 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.702303 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.702335 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.702361 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:42Z","lastTransitionTime":"2025-11-26T05:26:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.718488 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.732108 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.750278 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.769817 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.787347 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.801354 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.805802 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.805852 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.805869 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.805892 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.805910 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:42Z","lastTransitionTime":"2025-11-26T05:26:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.815326 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30b3c82b-ca2a-4821-86e0-94aa2afce847\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:16Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-z2d5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.832295 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.847452 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.908324 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.908406 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.908430 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.908460 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:42 crc kubenswrapper[4871]: I1126 05:26:42.908481 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:42Z","lastTransitionTime":"2025-11-26T05:26:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.010668 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.010728 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.010750 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.010777 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.010799 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:43Z","lastTransitionTime":"2025-11-26T05:26:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.113579 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.113670 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.113702 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.113731 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.113751 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:43Z","lastTransitionTime":"2025-11-26T05:26:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.216733 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.216782 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.216797 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.216816 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.216829 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:43Z","lastTransitionTime":"2025-11-26T05:26:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.321012 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.321074 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.321092 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.321119 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.321136 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:43Z","lastTransitionTime":"2025-11-26T05:26:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.424638 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.425012 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.425029 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.425056 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.425074 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:43Z","lastTransitionTime":"2025-11-26T05:26:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.527791 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.527858 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.527882 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.527909 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.527928 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:43Z","lastTransitionTime":"2025-11-26T05:26:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.630998 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.631090 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.631106 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.631129 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.631147 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:43Z","lastTransitionTime":"2025-11-26T05:26:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.733666 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.733719 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.733732 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.733751 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.733763 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:43Z","lastTransitionTime":"2025-11-26T05:26:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.836980 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.837056 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.837076 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.837104 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.837121 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:43Z","lastTransitionTime":"2025-11-26T05:26:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.939791 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.939836 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.939848 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.939866 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:43 crc kubenswrapper[4871]: I1126 05:26:43.939879 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:43Z","lastTransitionTime":"2025-11-26T05:26:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.042582 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.042642 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.042658 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.042675 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.042687 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:44Z","lastTransitionTime":"2025-11-26T05:26:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.145254 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.145315 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.145332 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.145356 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.145373 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:44Z","lastTransitionTime":"2025-11-26T05:26:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.247955 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.248019 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.248037 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.248062 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.248082 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:44Z","lastTransitionTime":"2025-11-26T05:26:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.350862 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.350924 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.350940 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.350960 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.350972 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:44Z","lastTransitionTime":"2025-11-26T05:26:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.453960 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.454020 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.454037 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.454061 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.454078 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:44Z","lastTransitionTime":"2025-11-26T05:26:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.506933 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.507008 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.507085 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:44 crc kubenswrapper[4871]: E1126 05:26:44.507280 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.507345 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:44 crc kubenswrapper[4871]: E1126 05:26:44.507609 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:44 crc kubenswrapper[4871]: E1126 05:26:44.507738 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:44 crc kubenswrapper[4871]: E1126 05:26:44.507867 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.556644 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.556706 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.556724 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.556748 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.556765 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:44Z","lastTransitionTime":"2025-11-26T05:26:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.659980 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.660042 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.660059 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.660085 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.660105 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:44Z","lastTransitionTime":"2025-11-26T05:26:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.763459 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.763837 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.764028 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.764254 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.764428 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:44Z","lastTransitionTime":"2025-11-26T05:26:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.867682 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.867724 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.867733 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.867748 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.867757 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:44Z","lastTransitionTime":"2025-11-26T05:26:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.971028 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.971070 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.971078 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.971093 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:44 crc kubenswrapper[4871]: I1126 05:26:44.971103 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:44Z","lastTransitionTime":"2025-11-26T05:26:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.074316 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.074379 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.074396 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.074420 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.074438 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:45Z","lastTransitionTime":"2025-11-26T05:26:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.176874 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.176953 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.176976 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.177009 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.177034 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:45Z","lastTransitionTime":"2025-11-26T05:26:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.280314 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.280393 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.280418 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.280449 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.280473 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:45Z","lastTransitionTime":"2025-11-26T05:26:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.383676 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.383729 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.383742 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.383759 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.383772 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:45Z","lastTransitionTime":"2025-11-26T05:26:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.487924 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.488010 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.488030 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.488056 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.488074 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:45Z","lastTransitionTime":"2025-11-26T05:26:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.591286 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.591354 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.591372 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.591398 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.591419 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:45Z","lastTransitionTime":"2025-11-26T05:26:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.695117 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.695201 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.695228 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.695259 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.695282 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:45Z","lastTransitionTime":"2025-11-26T05:26:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.798495 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.798593 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.798614 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.798637 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.798656 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:45Z","lastTransitionTime":"2025-11-26T05:26:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.901901 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.902009 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.902255 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.902662 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:45 crc kubenswrapper[4871]: I1126 05:26:45.902730 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:45Z","lastTransitionTime":"2025-11-26T05:26:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.005944 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.006014 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.006031 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.006052 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.006069 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:46Z","lastTransitionTime":"2025-11-26T05:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.108697 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.108742 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.108752 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.108766 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.108775 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:46Z","lastTransitionTime":"2025-11-26T05:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.211580 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.211622 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.211632 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.211646 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.211655 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:46Z","lastTransitionTime":"2025-11-26T05:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.314058 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.314094 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.314102 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.314114 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.314124 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:46Z","lastTransitionTime":"2025-11-26T05:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.417460 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.417557 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.417581 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.417608 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.417629 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:46Z","lastTransitionTime":"2025-11-26T05:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.506993 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:46 crc kubenswrapper[4871]: E1126 05:26:46.507114 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.507007 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:46 crc kubenswrapper[4871]: E1126 05:26:46.507176 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.507014 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.506993 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:46 crc kubenswrapper[4871]: E1126 05:26:46.507216 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:46 crc kubenswrapper[4871]: E1126 05:26:46.507441 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.519809 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.519862 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.519878 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.519893 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.519902 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:46Z","lastTransitionTime":"2025-11-26T05:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.621999 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.622038 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.622048 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.622061 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.622072 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:46Z","lastTransitionTime":"2025-11-26T05:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.724349 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.724409 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.724428 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.724452 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.724471 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:46Z","lastTransitionTime":"2025-11-26T05:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.827250 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.827301 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.827312 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.827327 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.827338 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:46Z","lastTransitionTime":"2025-11-26T05:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.929298 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.929356 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.929371 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.929387 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:46 crc kubenswrapper[4871]: I1126 05:26:46.929399 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:46Z","lastTransitionTime":"2025-11-26T05:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.031548 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.031584 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.031594 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.031608 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.031618 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:47Z","lastTransitionTime":"2025-11-26T05:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.133067 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.133171 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.133202 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.133218 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.133229 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:47Z","lastTransitionTime":"2025-11-26T05:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.235125 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.235175 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.235187 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.235203 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.235213 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:47Z","lastTransitionTime":"2025-11-26T05:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.337422 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.337469 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.337484 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.337505 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.337520 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:47Z","lastTransitionTime":"2025-11-26T05:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.439308 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.439345 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.439358 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.439370 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.439380 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:47Z","lastTransitionTime":"2025-11-26T05:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.507284 4871 scope.go:117] "RemoveContainer" containerID="e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f" Nov 26 05:26:47 crc kubenswrapper[4871]: E1126 05:26:47.507430 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.519118 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.541454 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.541491 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.541501 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.541518 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.541564 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:47Z","lastTransitionTime":"2025-11-26T05:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.644151 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.644356 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.644426 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.644518 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.644601 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:47Z","lastTransitionTime":"2025-11-26T05:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.746838 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.746881 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.746890 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.746905 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.746914 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:47Z","lastTransitionTime":"2025-11-26T05:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.848857 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.849110 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.849261 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.849397 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.849503 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:47Z","lastTransitionTime":"2025-11-26T05:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.952588 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.952655 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.952678 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.952706 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:47 crc kubenswrapper[4871]: I1126 05:26:47.952724 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:47Z","lastTransitionTime":"2025-11-26T05:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.055806 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.055859 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.055876 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.055899 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.055971 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:48Z","lastTransitionTime":"2025-11-26T05:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.158264 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.158338 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.158359 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.158385 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.158402 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:48Z","lastTransitionTime":"2025-11-26T05:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.261227 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.261268 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.261278 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.261293 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.261301 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:48Z","lastTransitionTime":"2025-11-26T05:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.364589 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.364652 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.364671 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.364695 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.364712 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:48Z","lastTransitionTime":"2025-11-26T05:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.466519 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.466966 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.467030 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.467105 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.467173 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:48Z","lastTransitionTime":"2025-11-26T05:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.506830 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.506865 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:48 crc kubenswrapper[4871]: E1126 05:26:48.506932 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.506952 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.507109 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:48 crc kubenswrapper[4871]: E1126 05:26:48.507101 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:48 crc kubenswrapper[4871]: E1126 05:26:48.507286 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:48 crc kubenswrapper[4871]: E1126 05:26:48.507495 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.570424 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.570496 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.570514 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.570569 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.570594 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:48Z","lastTransitionTime":"2025-11-26T05:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.672383 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.672416 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.672427 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.672460 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.672473 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:48Z","lastTransitionTime":"2025-11-26T05:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.775403 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.775447 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.775467 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.775494 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.775515 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:48Z","lastTransitionTime":"2025-11-26T05:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.783837 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs\") pod \"network-metrics-daemon-z2d5h\" (UID: \"30b3c82b-ca2a-4821-86e0-94aa2afce847\") " pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:48 crc kubenswrapper[4871]: E1126 05:26:48.784057 4871 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 05:26:48 crc kubenswrapper[4871]: E1126 05:26:48.784122 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs podName:30b3c82b-ca2a-4821-86e0-94aa2afce847 nodeName:}" failed. No retries permitted until 2025-11-26 05:27:20.784101954 +0000 UTC m=+98.967153570 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs") pod "network-metrics-daemon-z2d5h" (UID: "30b3c82b-ca2a-4821-86e0-94aa2afce847") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.878153 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.878179 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.878187 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.878199 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.878207 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:48Z","lastTransitionTime":"2025-11-26T05:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.981062 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.981086 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.981096 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.981107 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:48 crc kubenswrapper[4871]: I1126 05:26:48.981115 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:48Z","lastTransitionTime":"2025-11-26T05:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.083222 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.083269 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.083285 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.083307 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.083323 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:49Z","lastTransitionTime":"2025-11-26T05:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.185682 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.185717 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.185727 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.185741 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.185751 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:49Z","lastTransitionTime":"2025-11-26T05:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.288354 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.288639 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.288710 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.288772 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.288834 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:49Z","lastTransitionTime":"2025-11-26T05:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.391214 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.391264 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.391274 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.391291 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.391304 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:49Z","lastTransitionTime":"2025-11-26T05:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.494906 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.494964 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.494982 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.495008 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.495026 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:49Z","lastTransitionTime":"2025-11-26T05:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.597894 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.597951 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.597970 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.597994 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.598012 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:49Z","lastTransitionTime":"2025-11-26T05:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.706446 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.706487 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.706498 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.706513 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.706543 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:49Z","lastTransitionTime":"2025-11-26T05:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.707670 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.707698 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.707731 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.707745 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.707763 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:49Z","lastTransitionTime":"2025-11-26T05:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:49 crc kubenswrapper[4871]: E1126 05:26:49.724785 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.728518 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.728582 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.728596 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.728610 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.728621 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:49Z","lastTransitionTime":"2025-11-26T05:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:49 crc kubenswrapper[4871]: E1126 05:26:49.741493 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.744785 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.744813 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.744866 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.744885 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.744896 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:49Z","lastTransitionTime":"2025-11-26T05:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:49 crc kubenswrapper[4871]: E1126 05:26:49.756262 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.760451 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.760505 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.760518 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.760552 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.760567 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:49Z","lastTransitionTime":"2025-11-26T05:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:49 crc kubenswrapper[4871]: E1126 05:26:49.774789 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.778625 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.778657 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.778668 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.778685 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.778697 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:49Z","lastTransitionTime":"2025-11-26T05:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:49 crc kubenswrapper[4871]: E1126 05:26:49.788981 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:49 crc kubenswrapper[4871]: E1126 05:26:49.789139 4871 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.808822 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.808851 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.808861 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.808873 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.808883 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:49Z","lastTransitionTime":"2025-11-26T05:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.911037 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.911068 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.911078 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.911089 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:49 crc kubenswrapper[4871]: I1126 05:26:49.911098 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:49Z","lastTransitionTime":"2025-11-26T05:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.012957 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.012999 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.013011 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.013026 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.013036 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:50Z","lastTransitionTime":"2025-11-26T05:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.114983 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.115010 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.115020 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.115030 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.115039 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:50Z","lastTransitionTime":"2025-11-26T05:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.217197 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.217232 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.217241 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.217252 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.217261 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:50Z","lastTransitionTime":"2025-11-26T05:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.319589 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.319653 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.319670 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.319691 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.319709 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:50Z","lastTransitionTime":"2025-11-26T05:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.421363 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.421393 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.421401 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.421412 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.421439 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:50Z","lastTransitionTime":"2025-11-26T05:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.507151 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.507196 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.507201 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.507161 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:50 crc kubenswrapper[4871]: E1126 05:26:50.507355 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:50 crc kubenswrapper[4871]: E1126 05:26:50.507436 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:50 crc kubenswrapper[4871]: E1126 05:26:50.507512 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:50 crc kubenswrapper[4871]: E1126 05:26:50.507611 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.523044 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.523075 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.523084 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.523098 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.523106 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:50Z","lastTransitionTime":"2025-11-26T05:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.625156 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.625181 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.625190 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.625203 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.625212 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:50Z","lastTransitionTime":"2025-11-26T05:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.728390 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.728437 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.728453 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.728478 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.728495 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:50Z","lastTransitionTime":"2025-11-26T05:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.835068 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.835156 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.835186 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.835214 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.835234 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:50Z","lastTransitionTime":"2025-11-26T05:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.938072 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.938121 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.938133 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.938150 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:50 crc kubenswrapper[4871]: I1126 05:26:50.938160 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:50Z","lastTransitionTime":"2025-11-26T05:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.010076 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rpr6z_84290973-bc95-4326-bacd-7c210346620a/kube-multus/0.log" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.010156 4871 generic.go:334] "Generic (PLEG): container finished" podID="84290973-bc95-4326-bacd-7c210346620a" containerID="dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce" exitCode=1 Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.010200 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rpr6z" event={"ID":"84290973-bc95-4326-bacd-7c210346620a","Type":"ContainerDied","Data":"dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce"} Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.010868 4871 scope.go:117] "RemoveContainer" containerID="dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.028601 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:51Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.041490 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.041547 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.041558 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.041574 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.041584 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:51Z","lastTransitionTime":"2025-11-26T05:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.047500 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:51Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.067740 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:31Z\\\",\\\"message\\\":\\\"er during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z]\\\\nI1126 05:26:31.460634 6510 services_controller.go:453] Built service openshift-apiserver/check-endpoints template LB for network=default: []services.LB{}\\\\nI1126 05:26:31.460771 6510 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver/apiserver_TCP_cluster\\\\\\\", UUID:\\\\\\\"d71b38eb-32af-4c0f-9490-7c317c111e3a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver/apiserver\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]st\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:51Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.081763 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:51Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.095268 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3606989b-2f43-46e5-a90d-7fcfa83a970d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c147cae302ede8de5204573d8405e9aee2503d957606138e742af17dfd03f6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:51Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.112599 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:51Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.123837 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:51Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.132412 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:51Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.141670 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:51Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.148060 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.148091 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.148099 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.148121 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.148130 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:51Z","lastTransitionTime":"2025-11-26T05:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.157722 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:51Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.169662 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc94d0cf979cf21c04eefe998ba381ea922779f079981532be41e154faa17e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://735bd64ef711a24fc257d682557fc22bb34bb80f5a0ed913d575ce78a84cb01c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:51Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.190102 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"012685e4-7f48-4dc5-8c32-b4acd0ba0788\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e936d7790749be736341822bb370fc8729d1e006bffe538ff480a090b856cce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1e537a2837f366cb6a6343ffdcf998611f07d8c19f4fe9c0111862520ebbe5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f55fc830fdd852727a8ac6714209b06ef8394a19d313752c316fd0901a47f2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:51Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.207221 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:51Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.226284 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:51Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.250385 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.250440 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.250454 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.250475 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.250489 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:51Z","lastTransitionTime":"2025-11-26T05:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.263714 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30b3c82b-ca2a-4821-86e0-94aa2afce847\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:16Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-z2d5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:51Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.278749 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:51Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.290374 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:51Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:50Z\\\",\\\"message\\\":\\\"2025-11-26T05:26:05+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_af7260e1-7a21-40cc-8937-d6659f8aa1c5\\\\n2025-11-26T05:26:05+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_af7260e1-7a21-40cc-8937-d6659f8aa1c5 to /host/opt/cni/bin/\\\\n2025-11-26T05:26:05Z [verbose] multus-daemon started\\\\n2025-11-26T05:26:05Z [verbose] Readiness Indicator file check\\\\n2025-11-26T05:26:50Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:51Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.301974 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:51Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.353068 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.353107 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.353117 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.353131 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.353140 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:51Z","lastTransitionTime":"2025-11-26T05:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.456426 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.456468 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.456477 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.456492 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.456502 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:51Z","lastTransitionTime":"2025-11-26T05:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.559134 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.559174 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.559184 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.559198 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.559209 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:51Z","lastTransitionTime":"2025-11-26T05:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.661160 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.661199 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.661208 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.661222 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.661233 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:51Z","lastTransitionTime":"2025-11-26T05:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.763830 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.763889 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.763898 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.763915 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.763925 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:51Z","lastTransitionTime":"2025-11-26T05:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.866673 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.866767 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.866789 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.866813 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.866832 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:51Z","lastTransitionTime":"2025-11-26T05:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.969179 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.969244 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.969263 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.969287 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:51 crc kubenswrapper[4871]: I1126 05:26:51.969305 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:51Z","lastTransitionTime":"2025-11-26T05:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.015248 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rpr6z_84290973-bc95-4326-bacd-7c210346620a/kube-multus/0.log" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.015317 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rpr6z" event={"ID":"84290973-bc95-4326-bacd-7c210346620a","Type":"ContainerStarted","Data":"2c10dc36740ec87314e3a58a4a96133df5ecb4a901474b032895bb318b2c3ca6"} Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.031305 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.042522 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc94d0cf979cf21c04eefe998ba381ea922779f079981532be41e154faa17e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://735bd64ef711a24fc257d682557fc22bb34bb80f5a0ed913d575ce78a84cb01c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.053733 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"012685e4-7f48-4dc5-8c32-b4acd0ba0788\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e936d7790749be736341822bb370fc8729d1e006bffe538ff480a090b856cce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1e537a2837f366cb6a6343ffdcf998611f07d8c19f4fe9c0111862520ebbe5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f55fc830fdd852727a8ac6714209b06ef8394a19d313752c316fd0901a47f2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.067313 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.075675 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.075720 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.075730 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.075744 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.075755 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:52Z","lastTransitionTime":"2025-11-26T05:26:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.084246 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.095373 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.104345 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.122807 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.138601 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.151843 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.162243 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30b3c82b-ca2a-4821-86e0-94aa2afce847\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:16Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-z2d5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.173899 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c10dc36740ec87314e3a58a4a96133df5ecb4a901474b032895bb318b2c3ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:50Z\\\",\\\"message\\\":\\\"2025-11-26T05:26:05+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_af7260e1-7a21-40cc-8937-d6659f8aa1c5\\\\n2025-11-26T05:26:05+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_af7260e1-7a21-40cc-8937-d6659f8aa1c5 to /host/opt/cni/bin/\\\\n2025-11-26T05:26:05Z [verbose] multus-daemon started\\\\n2025-11-26T05:26:05Z [verbose] Readiness Indicator file check\\\\n2025-11-26T05:26:50Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.178029 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.178076 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.178094 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.178117 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.178133 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:52Z","lastTransitionTime":"2025-11-26T05:26:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.183656 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.194635 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3606989b-2f43-46e5-a90d-7fcfa83a970d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c147cae302ede8de5204573d8405e9aee2503d957606138e742af17dfd03f6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.206013 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.218195 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.238627 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:31Z\\\",\\\"message\\\":\\\"er during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z]\\\\nI1126 05:26:31.460634 6510 services_controller.go:453] Built service openshift-apiserver/check-endpoints template LB for network=default: []services.LB{}\\\\nI1126 05:26:31.460771 6510 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver/apiserver_TCP_cluster\\\\\\\", UUID:\\\\\\\"d71b38eb-32af-4c0f-9490-7c317c111e3a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver/apiserver\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]st\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.248280 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.280458 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.280512 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.280543 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.280564 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.280578 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:52Z","lastTransitionTime":"2025-11-26T05:26:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.382549 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.382585 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.382597 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.382614 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.382624 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:52Z","lastTransitionTime":"2025-11-26T05:26:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.484782 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.484815 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.484826 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.484841 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.484852 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:52Z","lastTransitionTime":"2025-11-26T05:26:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.506344 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.506431 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:52 crc kubenswrapper[4871]: E1126 05:26:52.506651 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.506690 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.506694 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:52 crc kubenswrapper[4871]: E1126 05:26:52.506775 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:52 crc kubenswrapper[4871]: E1126 05:26:52.506959 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:52 crc kubenswrapper[4871]: E1126 05:26:52.507042 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.517923 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30b3c82b-ca2a-4821-86e0-94aa2afce847\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:16Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-z2d5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.532239 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.544393 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.556181 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.566385 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c10dc36740ec87314e3a58a4a96133df5ecb4a901474b032895bb318b2c3ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:50Z\\\",\\\"message\\\":\\\"2025-11-26T05:26:05+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_af7260e1-7a21-40cc-8937-d6659f8aa1c5\\\\n2025-11-26T05:26:05+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_af7260e1-7a21-40cc-8937-d6659f8aa1c5 to /host/opt/cni/bin/\\\\n2025-11-26T05:26:05Z [verbose] multus-daemon started\\\\n2025-11-26T05:26:05Z [verbose] Readiness Indicator file check\\\\n2025-11-26T05:26:50Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.576599 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.587368 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.587409 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.587421 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.587439 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.587450 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:52Z","lastTransitionTime":"2025-11-26T05:26:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.595249 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:31Z\\\",\\\"message\\\":\\\"er during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z]\\\\nI1126 05:26:31.460634 6510 services_controller.go:453] Built service openshift-apiserver/check-endpoints template LB for network=default: []services.LB{}\\\\nI1126 05:26:31.460771 6510 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver/apiserver_TCP_cluster\\\\\\\", UUID:\\\\\\\"d71b38eb-32af-4c0f-9490-7c317c111e3a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver/apiserver\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]st\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.606069 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.616869 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3606989b-2f43-46e5-a90d-7fcfa83a970d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c147cae302ede8de5204573d8405e9aee2503d957606138e742af17dfd03f6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.629161 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.640326 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.654723 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.665848 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.681004 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.690460 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.690519 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.690575 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.690604 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.690627 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:52Z","lastTransitionTime":"2025-11-26T05:26:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.695250 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc94d0cf979cf21c04eefe998ba381ea922779f079981532be41e154faa17e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://735bd64ef711a24fc257d682557fc22bb34bb80f5a0ed913d575ce78a84cb01c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.708634 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"012685e4-7f48-4dc5-8c32-b4acd0ba0788\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e936d7790749be736341822bb370fc8729d1e006bffe538ff480a090b856cce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1e537a2837f366cb6a6343ffdcf998611f07d8c19f4fe9c0111862520ebbe5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f55fc830fdd852727a8ac6714209b06ef8394a19d313752c316fd0901a47f2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.722644 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.736691 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:52Z is after 2025-08-24T17:21:41Z" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.792053 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.792085 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.792095 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.792110 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.792120 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:52Z","lastTransitionTime":"2025-11-26T05:26:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.893780 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.893817 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.893828 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.893844 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.893855 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:52Z","lastTransitionTime":"2025-11-26T05:26:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.995583 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.995667 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.995691 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.995720 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:52 crc kubenswrapper[4871]: I1126 05:26:52.995737 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:52Z","lastTransitionTime":"2025-11-26T05:26:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.097957 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.098001 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.098013 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.098030 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.098042 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:53Z","lastTransitionTime":"2025-11-26T05:26:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.200196 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.200237 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.200250 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.200266 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.200276 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:53Z","lastTransitionTime":"2025-11-26T05:26:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.303781 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.303848 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.303871 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.303900 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.303925 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:53Z","lastTransitionTime":"2025-11-26T05:26:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.406796 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.406898 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.406922 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.406951 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.406971 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:53Z","lastTransitionTime":"2025-11-26T05:26:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.508795 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.508834 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.508848 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.508860 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.508870 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:53Z","lastTransitionTime":"2025-11-26T05:26:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.612237 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.612297 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.612335 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.612359 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.612378 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:53Z","lastTransitionTime":"2025-11-26T05:26:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.714341 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.714402 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.714412 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.714425 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.714433 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:53Z","lastTransitionTime":"2025-11-26T05:26:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.816832 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.816881 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.816899 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.816921 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.816937 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:53Z","lastTransitionTime":"2025-11-26T05:26:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.919988 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.920027 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.920045 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.920065 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:53 crc kubenswrapper[4871]: I1126 05:26:53.920083 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:53Z","lastTransitionTime":"2025-11-26T05:26:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.022181 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.022238 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.022256 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.022279 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.022296 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:54Z","lastTransitionTime":"2025-11-26T05:26:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.125556 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.125808 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.125901 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.125988 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.126081 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:54Z","lastTransitionTime":"2025-11-26T05:26:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.228855 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.228908 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.228925 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.228948 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.228963 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:54Z","lastTransitionTime":"2025-11-26T05:26:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.331703 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.331737 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.331749 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.331763 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.331774 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:54Z","lastTransitionTime":"2025-11-26T05:26:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.435045 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.435354 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.435480 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.435610 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.435733 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:54Z","lastTransitionTime":"2025-11-26T05:26:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.507075 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.507139 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:54 crc kubenswrapper[4871]: E1126 05:26:54.507473 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.507198 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:54 crc kubenswrapper[4871]: E1126 05:26:54.507632 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.507179 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:54 crc kubenswrapper[4871]: E1126 05:26:54.508344 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:54 crc kubenswrapper[4871]: E1126 05:26:54.508465 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.537498 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.537549 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.537560 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.537574 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.537584 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:54Z","lastTransitionTime":"2025-11-26T05:26:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.639650 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.639683 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.639697 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.639711 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.639722 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:54Z","lastTransitionTime":"2025-11-26T05:26:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.743297 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.743656 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.743808 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.743944 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.744085 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:54Z","lastTransitionTime":"2025-11-26T05:26:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.847409 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.847455 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.847475 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.847493 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.847505 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:54Z","lastTransitionTime":"2025-11-26T05:26:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.951042 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.951082 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.951091 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.951106 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:54 crc kubenswrapper[4871]: I1126 05:26:54.951116 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:54Z","lastTransitionTime":"2025-11-26T05:26:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.053269 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.053335 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.053351 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.053374 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.053391 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:55Z","lastTransitionTime":"2025-11-26T05:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.156071 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.156151 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.156175 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.156246 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.156270 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:55Z","lastTransitionTime":"2025-11-26T05:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.259353 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.259398 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.259410 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.259437 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.259450 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:55Z","lastTransitionTime":"2025-11-26T05:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.361773 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.361805 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.361813 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.361825 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.361834 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:55Z","lastTransitionTime":"2025-11-26T05:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.465235 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.465291 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.465308 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.465332 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.465350 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:55Z","lastTransitionTime":"2025-11-26T05:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.567766 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.567813 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.567830 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.567853 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.567872 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:55Z","lastTransitionTime":"2025-11-26T05:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.671284 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.671337 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.671354 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.671379 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.671400 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:55Z","lastTransitionTime":"2025-11-26T05:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.774650 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.774716 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.774733 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.774757 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.774774 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:55Z","lastTransitionTime":"2025-11-26T05:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.877191 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.877224 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.877232 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.877244 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.877252 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:55Z","lastTransitionTime":"2025-11-26T05:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.979959 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.980026 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.980049 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.980077 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:55 crc kubenswrapper[4871]: I1126 05:26:55.980116 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:55Z","lastTransitionTime":"2025-11-26T05:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.082884 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.082942 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.082959 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.082981 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.082996 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:56Z","lastTransitionTime":"2025-11-26T05:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.186300 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.186373 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.186397 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.186425 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.186447 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:56Z","lastTransitionTime":"2025-11-26T05:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.289380 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.289432 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.289445 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.289465 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.289478 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:56Z","lastTransitionTime":"2025-11-26T05:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.392675 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.392703 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.392712 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.392725 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.392734 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:56Z","lastTransitionTime":"2025-11-26T05:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.495522 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.495607 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.495627 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.495650 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.495668 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:56Z","lastTransitionTime":"2025-11-26T05:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.506829 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:56 crc kubenswrapper[4871]: E1126 05:26:56.506991 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.507453 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:56 crc kubenswrapper[4871]: E1126 05:26:56.507606 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.507842 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:56 crc kubenswrapper[4871]: E1126 05:26:56.507963 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.506771 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:56 crc kubenswrapper[4871]: E1126 05:26:56.512891 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.599749 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.599814 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.599840 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.599869 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.599897 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:56Z","lastTransitionTime":"2025-11-26T05:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.703820 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.703867 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.703884 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.703907 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.703924 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:56Z","lastTransitionTime":"2025-11-26T05:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.806856 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.806901 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.806917 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.806938 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.806956 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:56Z","lastTransitionTime":"2025-11-26T05:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.910289 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.910346 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.910364 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.910387 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:56 crc kubenswrapper[4871]: I1126 05:26:56.910404 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:56Z","lastTransitionTime":"2025-11-26T05:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.013870 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.013926 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.013942 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.013966 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.013985 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:57Z","lastTransitionTime":"2025-11-26T05:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.116937 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.117007 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.117030 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.117053 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.117070 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:57Z","lastTransitionTime":"2025-11-26T05:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.220037 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.220120 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.220139 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.220166 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.220188 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:57Z","lastTransitionTime":"2025-11-26T05:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.323459 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.323563 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.323582 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.323609 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.323627 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:57Z","lastTransitionTime":"2025-11-26T05:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.427166 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.427260 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.427285 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.427315 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.427338 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:57Z","lastTransitionTime":"2025-11-26T05:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.530482 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.530597 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.530616 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.530640 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.530657 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:57Z","lastTransitionTime":"2025-11-26T05:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.633671 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.634065 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.634249 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.634449 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.634685 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:57Z","lastTransitionTime":"2025-11-26T05:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.738088 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.738762 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.738805 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.738833 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.738850 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:57Z","lastTransitionTime":"2025-11-26T05:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.841421 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.841489 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.841506 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.841555 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.841622 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:57Z","lastTransitionTime":"2025-11-26T05:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.944403 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.944706 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.944819 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.944911 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:57 crc kubenswrapper[4871]: I1126 05:26:57.944995 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:57Z","lastTransitionTime":"2025-11-26T05:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.048486 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.048572 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.048592 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.048618 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.048636 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:58Z","lastTransitionTime":"2025-11-26T05:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.151427 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.151489 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.151507 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.151575 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.151599 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:58Z","lastTransitionTime":"2025-11-26T05:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.255956 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.256035 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.256058 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.256088 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.256110 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:58Z","lastTransitionTime":"2025-11-26T05:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.360381 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.360443 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.360464 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.360492 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.360514 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:58Z","lastTransitionTime":"2025-11-26T05:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.463036 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.463459 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.463667 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.463817 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.463993 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:58Z","lastTransitionTime":"2025-11-26T05:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.506686 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.506732 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.506757 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:26:58 crc kubenswrapper[4871]: E1126 05:26:58.507496 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:26:58 crc kubenswrapper[4871]: E1126 05:26:58.507263 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.506809 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:26:58 crc kubenswrapper[4871]: E1126 05:26:58.507777 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:26:58 crc kubenswrapper[4871]: E1126 05:26:58.507817 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.567393 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.567447 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.567463 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.567489 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.567509 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:58Z","lastTransitionTime":"2025-11-26T05:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.670621 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.670659 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.670675 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.670697 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.670712 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:58Z","lastTransitionTime":"2025-11-26T05:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.774263 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.775470 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.775671 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.775821 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.775995 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:58Z","lastTransitionTime":"2025-11-26T05:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.879855 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.879944 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.879972 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.880005 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.880031 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:58Z","lastTransitionTime":"2025-11-26T05:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.983978 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.984045 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.984063 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.984088 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:58 crc kubenswrapper[4871]: I1126 05:26:58.984104 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:58Z","lastTransitionTime":"2025-11-26T05:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.087676 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.087751 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.087776 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.087805 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.087823 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:59Z","lastTransitionTime":"2025-11-26T05:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.190381 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.190658 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.190755 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.190838 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.190924 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:59Z","lastTransitionTime":"2025-11-26T05:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.294460 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.294507 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.294520 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.294562 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.294576 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:59Z","lastTransitionTime":"2025-11-26T05:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.398361 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.398419 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.398436 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.398460 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.398476 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:59Z","lastTransitionTime":"2025-11-26T05:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.500245 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.500278 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.500289 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.500304 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.500318 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:59Z","lastTransitionTime":"2025-11-26T05:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.602837 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.602912 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.602952 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.602988 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.603011 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:59Z","lastTransitionTime":"2025-11-26T05:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.705558 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.705627 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.705653 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.705683 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.705705 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:59Z","lastTransitionTime":"2025-11-26T05:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.808649 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.809477 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.809669 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.809819 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.809959 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:59Z","lastTransitionTime":"2025-11-26T05:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.912951 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.913010 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.913029 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.913053 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:26:59 crc kubenswrapper[4871]: I1126 05:26:59.913070 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:26:59Z","lastTransitionTime":"2025-11-26T05:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.016220 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.016680 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.016924 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.017254 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.017746 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:00Z","lastTransitionTime":"2025-11-26T05:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.120698 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.120733 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.120744 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.120756 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.120765 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:00Z","lastTransitionTime":"2025-11-26T05:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.135212 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.135267 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.135284 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.135308 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.135326 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:00Z","lastTransitionTime":"2025-11-26T05:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:00 crc kubenswrapper[4871]: E1126 05:27:00.152751 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:00Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.156664 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.156779 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.156889 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.156968 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.157042 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:00Z","lastTransitionTime":"2025-11-26T05:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:00 crc kubenswrapper[4871]: E1126 05:27:00.178327 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:00Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.182924 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.183146 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.183173 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.183493 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.183512 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:00Z","lastTransitionTime":"2025-11-26T05:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:00 crc kubenswrapper[4871]: E1126 05:27:00.200957 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:00Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.206085 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.206141 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.206160 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.206184 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.206201 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:00Z","lastTransitionTime":"2025-11-26T05:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:00 crc kubenswrapper[4871]: E1126 05:27:00.224391 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:00Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.229159 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.229201 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.229218 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.229282 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.229301 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:00Z","lastTransitionTime":"2025-11-26T05:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:00 crc kubenswrapper[4871]: E1126 05:27:00.248029 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:00Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:00 crc kubenswrapper[4871]: E1126 05:27:00.248267 4871 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.250181 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.250236 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.250255 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.250279 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.250304 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:00Z","lastTransitionTime":"2025-11-26T05:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.352279 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.352363 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.352388 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.352423 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.352448 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:00Z","lastTransitionTime":"2025-11-26T05:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.455613 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.455651 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.455663 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.455679 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.455690 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:00Z","lastTransitionTime":"2025-11-26T05:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.506917 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.506967 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:00 crc kubenswrapper[4871]: E1126 05:27:00.507146 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.506942 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.507239 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:00 crc kubenswrapper[4871]: E1126 05:27:00.507346 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:00 crc kubenswrapper[4871]: E1126 05:27:00.507508 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:00 crc kubenswrapper[4871]: E1126 05:27:00.507569 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.558072 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.558141 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.558163 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.558246 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.558317 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:00Z","lastTransitionTime":"2025-11-26T05:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.661557 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.661628 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.661647 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.661721 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.661749 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:00Z","lastTransitionTime":"2025-11-26T05:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.764850 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.764914 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.764934 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.764960 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.764978 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:00Z","lastTransitionTime":"2025-11-26T05:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.868237 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.868293 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.868303 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.868318 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.868327 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:00Z","lastTransitionTime":"2025-11-26T05:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.971219 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.971268 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.971280 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.971298 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:00 crc kubenswrapper[4871]: I1126 05:27:00.971313 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:00Z","lastTransitionTime":"2025-11-26T05:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.073961 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.074011 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.074022 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.074039 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.074049 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:01Z","lastTransitionTime":"2025-11-26T05:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.176698 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.176774 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.176792 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.176816 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.176832 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:01Z","lastTransitionTime":"2025-11-26T05:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.279805 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.279869 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.279889 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.279913 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.279929 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:01Z","lastTransitionTime":"2025-11-26T05:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.383434 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.383483 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.383495 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.383512 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.383545 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:01Z","lastTransitionTime":"2025-11-26T05:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.486615 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.486923 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.487140 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.487282 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.487414 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:01Z","lastTransitionTime":"2025-11-26T05:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.508408 4871 scope.go:117] "RemoveContainer" containerID="e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.591617 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.591666 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.591678 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.591696 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.591708 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:01Z","lastTransitionTime":"2025-11-26T05:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.695193 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.695388 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.695420 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.695451 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.695476 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:01Z","lastTransitionTime":"2025-11-26T05:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.798722 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.798778 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.798793 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.798815 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.798831 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:01Z","lastTransitionTime":"2025-11-26T05:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.902224 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.902286 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.902308 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.902336 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:01 crc kubenswrapper[4871]: I1126 05:27:01.902359 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:01Z","lastTransitionTime":"2025-11-26T05:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.004428 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.004470 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.004485 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.004501 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.004512 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:02Z","lastTransitionTime":"2025-11-26T05:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.051933 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovnkube-controller/2.log" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.054377 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerStarted","Data":"165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef"} Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.054860 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.069807 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.082105 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.094115 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.106872 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.106944 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.106957 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.106974 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.106985 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:02Z","lastTransitionTime":"2025-11-26T05:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.108699 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30b3c82b-ca2a-4821-86e0-94aa2afce847\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:16Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-z2d5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.120036 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c10dc36740ec87314e3a58a4a96133df5ecb4a901474b032895bb318b2c3ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:50Z\\\",\\\"message\\\":\\\"2025-11-26T05:26:05+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_af7260e1-7a21-40cc-8937-d6659f8aa1c5\\\\n2025-11-26T05:26:05+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_af7260e1-7a21-40cc-8937-d6659f8aa1c5 to /host/opt/cni/bin/\\\\n2025-11-26T05:26:05Z [verbose] multus-daemon started\\\\n2025-11-26T05:26:05Z [verbose] Readiness Indicator file check\\\\n2025-11-26T05:26:50Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.132266 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.141409 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3606989b-2f43-46e5-a90d-7fcfa83a970d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c147cae302ede8de5204573d8405e9aee2503d957606138e742af17dfd03f6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.154173 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.164867 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.184224 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:31Z\\\",\\\"message\\\":\\\"er during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z]\\\\nI1126 05:26:31.460634 6510 services_controller.go:453] Built service openshift-apiserver/check-endpoints template LB for network=default: []services.LB{}\\\\nI1126 05:26:31.460771 6510 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver/apiserver_TCP_cluster\\\\\\\", UUID:\\\\\\\"d71b38eb-32af-4c0f-9490-7c317c111e3a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver/apiserver\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]st\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:27:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.194882 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.205185 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc94d0cf979cf21c04eefe998ba381ea922779f079981532be41e154faa17e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://735bd64ef711a24fc257d682557fc22bb34bb80f5a0ed913d575ce78a84cb01c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.209343 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.209375 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.209386 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.209401 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.209411 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:02Z","lastTransitionTime":"2025-11-26T05:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.216849 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"012685e4-7f48-4dc5-8c32-b4acd0ba0788\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e936d7790749be736341822bb370fc8729d1e006bffe538ff480a090b856cce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1e537a2837f366cb6a6343ffdcf998611f07d8c19f4fe9c0111862520ebbe5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f55fc830fdd852727a8ac6714209b06ef8394a19d313752c316fd0901a47f2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.229199 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.242019 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.261934 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.277023 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.289677 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.311762 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.311794 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.311802 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.311814 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.311822 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:02Z","lastTransitionTime":"2025-11-26T05:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.414473 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.414539 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.414557 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.414581 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.414596 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:02Z","lastTransitionTime":"2025-11-26T05:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.506957 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:02 crc kubenswrapper[4871]: E1126 05:27:02.507171 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.507352 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:02 crc kubenswrapper[4871]: E1126 05:27:02.507596 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.507627 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:02 crc kubenswrapper[4871]: E1126 05:27:02.507850 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.507769 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:02 crc kubenswrapper[4871]: E1126 05:27:02.508062 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.517367 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.517421 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.517437 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.517459 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.517476 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:02Z","lastTransitionTime":"2025-11-26T05:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.524154 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c10dc36740ec87314e3a58a4a96133df5ecb4a901474b032895bb318b2c3ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:50Z\\\",\\\"message\\\":\\\"2025-11-26T05:26:05+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_af7260e1-7a21-40cc-8937-d6659f8aa1c5\\\\n2025-11-26T05:26:05+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_af7260e1-7a21-40cc-8937-d6659f8aa1c5 to /host/opt/cni/bin/\\\\n2025-11-26T05:26:05Z [verbose] multus-daemon started\\\\n2025-11-26T05:26:05Z [verbose] Readiness Indicator file check\\\\n2025-11-26T05:26:50Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.537862 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.554648 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.576749 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.600083 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:31Z\\\",\\\"message\\\":\\\"er during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z]\\\\nI1126 05:26:31.460634 6510 services_controller.go:453] Built service openshift-apiserver/check-endpoints template LB for network=default: []services.LB{}\\\\nI1126 05:26:31.460771 6510 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver/apiserver_TCP_cluster\\\\\\\", UUID:\\\\\\\"d71b38eb-32af-4c0f-9490-7c317c111e3a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver/apiserver\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]st\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:27:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.617932 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.619899 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.619950 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.619970 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.619993 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.620010 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:02Z","lastTransitionTime":"2025-11-26T05:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.634720 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3606989b-2f43-46e5-a90d-7fcfa83a970d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c147cae302ede8de5204573d8405e9aee2503d957606138e742af17dfd03f6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.652748 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.672388 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.697256 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.713038 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.723136 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.723198 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.723219 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.723246 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.723264 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:02Z","lastTransitionTime":"2025-11-26T05:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.735366 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.753935 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc94d0cf979cf21c04eefe998ba381ea922779f079981532be41e154faa17e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://735bd64ef711a24fc257d682557fc22bb34bb80f5a0ed913d575ce78a84cb01c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.770790 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"012685e4-7f48-4dc5-8c32-b4acd0ba0788\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e936d7790749be736341822bb370fc8729d1e006bffe538ff480a090b856cce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1e537a2837f366cb6a6343ffdcf998611f07d8c19f4fe9c0111862520ebbe5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f55fc830fdd852727a8ac6714209b06ef8394a19d313752c316fd0901a47f2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.791440 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.812455 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.826319 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.826392 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.826411 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.826440 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.826463 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:02Z","lastTransitionTime":"2025-11-26T05:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.830982 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30b3c82b-ca2a-4821-86e0-94aa2afce847\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:16Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-z2d5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.850284 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:02Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.929200 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.930097 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.930231 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.930413 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:02 crc kubenswrapper[4871]: I1126 05:27:02.930603 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:02Z","lastTransitionTime":"2025-11-26T05:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.033823 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.033906 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.033924 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.033977 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.033991 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:03Z","lastTransitionTime":"2025-11-26T05:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.060276 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovnkube-controller/3.log" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.061231 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovnkube-controller/2.log" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.065279 4871 generic.go:334] "Generic (PLEG): container finished" podID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerID="165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef" exitCode=1 Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.065330 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerDied","Data":"165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef"} Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.065402 4871 scope.go:117] "RemoveContainer" containerID="e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.072598 4871 scope.go:117] "RemoveContainer" containerID="165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef" Nov 26 05:27:03 crc kubenswrapper[4871]: E1126 05:27:03.072780 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.086639 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"012685e4-7f48-4dc5-8c32-b4acd0ba0788\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e936d7790749be736341822bb370fc8729d1e006bffe538ff480a090b856cce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1e537a2837f366cb6a6343ffdcf998611f07d8c19f4fe9c0111862520ebbe5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f55fc830fdd852727a8ac6714209b06ef8394a19d313752c316fd0901a47f2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.103465 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.121672 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.136612 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.136665 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.136688 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.136722 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.136745 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:03Z","lastTransitionTime":"2025-11-26T05:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.139865 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.155518 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.175834 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.188713 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc94d0cf979cf21c04eefe998ba381ea922779f079981532be41e154faa17e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://735bd64ef711a24fc257d682557fc22bb34bb80f5a0ed913d575ce78a84cb01c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.201673 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.218267 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.234325 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.239369 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.239413 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.239449 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.239472 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.239484 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:03Z","lastTransitionTime":"2025-11-26T05:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.250361 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30b3c82b-ca2a-4821-86e0-94aa2afce847\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:16Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-z2d5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.265952 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c10dc36740ec87314e3a58a4a96133df5ecb4a901474b032895bb318b2c3ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:50Z\\\",\\\"message\\\":\\\"2025-11-26T05:26:05+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_af7260e1-7a21-40cc-8937-d6659f8aa1c5\\\\n2025-11-26T05:26:05+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_af7260e1-7a21-40cc-8937-d6659f8aa1c5 to /host/opt/cni/bin/\\\\n2025-11-26T05:26:05Z [verbose] multus-daemon started\\\\n2025-11-26T05:26:05Z [verbose] Readiness Indicator file check\\\\n2025-11-26T05:26:50Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.282478 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.296873 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3606989b-2f43-46e5-a90d-7fcfa83a970d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c147cae302ede8de5204573d8405e9aee2503d957606138e742af17dfd03f6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.318432 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.335701 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.341616 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.341673 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.341690 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.341711 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.341728 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:03Z","lastTransitionTime":"2025-11-26T05:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.360961 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e92294b624af1c01b9d352a536bc1743edd757b7e1312016ca66c6ab69b4b08f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:31Z\\\",\\\"message\\\":\\\"er during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:26:31Z is after 2025-08-24T17:21:41Z]\\\\nI1126 05:26:31.460634 6510 services_controller.go:453] Built service openshift-apiserver/check-endpoints template LB for network=default: []services.LB{}\\\\nI1126 05:26:31.460771 6510 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-apiserver/apiserver_TCP_cluster\\\\\\\", UUID:\\\\\\\"d71b38eb-32af-4c0f-9490-7c317c111e3a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-apiserver/apiserver\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]st\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:27:02Z\\\",\\\"message\\\":\\\"ssip/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 05:27:02.405821 6869 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 05:27:02.405841 6869 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 05:27:02.405880 6869 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 05:27:02.405877 6869 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 05:27:02.405910 6869 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 05:27:02.405896 6869 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 05:27:02.405948 6869 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 05:27:02.406002 6869 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 05:27:02.406001 6869 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 05:27:02.406024 6869 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 05:27:02.406037 6869 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 05:27:02.406044 6869 factory.go:656] Stopping watch factory\\\\nI1126 05:27:02.406057 6869 ovnkube.go:599] Stopped ovnkube\\\\nI1126 05:27:02.406103 6869 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 05:27:02.406123 6869 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 05:27:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:27:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.377214 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:03Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.473565 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.473627 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.473644 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.473668 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.473685 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:03Z","lastTransitionTime":"2025-11-26T05:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.576773 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.576843 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.576862 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.576888 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.576905 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:03Z","lastTransitionTime":"2025-11-26T05:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.679853 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.680132 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.680142 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.680156 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.680164 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:03Z","lastTransitionTime":"2025-11-26T05:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.782298 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.782324 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.782332 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.782344 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.782353 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:03Z","lastTransitionTime":"2025-11-26T05:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.885768 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.885844 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.885869 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.885898 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.885920 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:03Z","lastTransitionTime":"2025-11-26T05:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.988599 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.988662 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.988675 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.988694 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:03 crc kubenswrapper[4871]: I1126 05:27:03.988707 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:03Z","lastTransitionTime":"2025-11-26T05:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.070702 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovnkube-controller/3.log" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.081841 4871 scope.go:117] "RemoveContainer" containerID="165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef" Nov 26 05:27:04 crc kubenswrapper[4871]: E1126 05:27:04.082167 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.090713 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.090767 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.090820 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.090851 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.090874 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:04Z","lastTransitionTime":"2025-11-26T05:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.094978 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"012685e4-7f48-4dc5-8c32-b4acd0ba0788\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e936d7790749be736341822bb370fc8729d1e006bffe538ff480a090b856cce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1e537a2837f366cb6a6343ffdcf998611f07d8c19f4fe9c0111862520ebbe5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f55fc830fdd852727a8ac6714209b06ef8394a19d313752c316fd0901a47f2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.114133 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.133084 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.150348 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.164418 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.188515 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.193228 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.193276 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.193293 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.193315 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.193332 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:04Z","lastTransitionTime":"2025-11-26T05:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.205878 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc94d0cf979cf21c04eefe998ba381ea922779f079981532be41e154faa17e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://735bd64ef711a24fc257d682557fc22bb34bb80f5a0ed913d575ce78a84cb01c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.225436 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.247068 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.262574 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.278082 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30b3c82b-ca2a-4821-86e0-94aa2afce847\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:16Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-z2d5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.295966 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.296101 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.296124 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.296155 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.296173 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:04Z","lastTransitionTime":"2025-11-26T05:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.297570 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c10dc36740ec87314e3a58a4a96133df5ecb4a901474b032895bb318b2c3ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:50Z\\\",\\\"message\\\":\\\"2025-11-26T05:26:05+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_af7260e1-7a21-40cc-8937-d6659f8aa1c5\\\\n2025-11-26T05:26:05+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_af7260e1-7a21-40cc-8937-d6659f8aa1c5 to /host/opt/cni/bin/\\\\n2025-11-26T05:26:05Z [verbose] multus-daemon started\\\\n2025-11-26T05:26:05Z [verbose] Readiness Indicator file check\\\\n2025-11-26T05:26:50Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.311201 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.326235 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3606989b-2f43-46e5-a90d-7fcfa83a970d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c147cae302ede8de5204573d8405e9aee2503d957606138e742af17dfd03f6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.341448 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.361784 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.392093 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:27:02Z\\\",\\\"message\\\":\\\"ssip/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 05:27:02.405821 6869 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 05:27:02.405841 6869 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 05:27:02.405880 6869 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 05:27:02.405877 6869 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 05:27:02.405910 6869 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 05:27:02.405896 6869 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 05:27:02.405948 6869 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 05:27:02.406002 6869 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 05:27:02.406001 6869 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 05:27:02.406024 6869 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 05:27:02.406037 6869 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 05:27:02.406044 6869 factory.go:656] Stopping watch factory\\\\nI1126 05:27:02.406057 6869 ovnkube.go:599] Stopped ovnkube\\\\nI1126 05:27:02.406103 6869 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 05:27:02.406123 6869 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 05:27:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:27:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.398746 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.398824 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.398848 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.398877 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.398896 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:04Z","lastTransitionTime":"2025-11-26T05:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.409702 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:04Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.508928 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:04 crc kubenswrapper[4871]: E1126 05:27:04.509044 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.509243 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:04 crc kubenswrapper[4871]: E1126 05:27:04.509309 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.509450 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:04 crc kubenswrapper[4871]: E1126 05:27:04.509516 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.509689 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:04 crc kubenswrapper[4871]: E1126 05:27:04.509749 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.510096 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.510115 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.510125 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.510139 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.510149 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:04Z","lastTransitionTime":"2025-11-26T05:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.613376 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.613434 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.613458 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.613488 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.613510 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:04Z","lastTransitionTime":"2025-11-26T05:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.716922 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.716993 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.717016 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.717046 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.717067 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:04Z","lastTransitionTime":"2025-11-26T05:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.820464 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.820574 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.820600 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.820628 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.820648 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:04Z","lastTransitionTime":"2025-11-26T05:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.923640 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.923693 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.923715 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.923742 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:04 crc kubenswrapper[4871]: I1126 05:27:04.923763 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:04Z","lastTransitionTime":"2025-11-26T05:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.027187 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.027251 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.027277 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.027305 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.027327 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:05Z","lastTransitionTime":"2025-11-26T05:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.130307 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.130375 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.130392 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.130415 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.130432 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:05Z","lastTransitionTime":"2025-11-26T05:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.234925 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.234982 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.234999 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.235023 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.235040 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:05Z","lastTransitionTime":"2025-11-26T05:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.338831 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.338884 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.338903 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.338933 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.338956 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:05Z","lastTransitionTime":"2025-11-26T05:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.443147 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.443206 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.443224 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.443251 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.443268 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:05Z","lastTransitionTime":"2025-11-26T05:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.546151 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.546213 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.546238 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.546267 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.546288 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:05Z","lastTransitionTime":"2025-11-26T05:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.649353 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.649411 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.649429 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.649456 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.649473 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:05Z","lastTransitionTime":"2025-11-26T05:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.752918 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.752961 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.752973 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.752995 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.753007 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:05Z","lastTransitionTime":"2025-11-26T05:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.856279 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.856354 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.856376 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.856405 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.856428 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:05Z","lastTransitionTime":"2025-11-26T05:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.959723 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.959799 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.959818 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.959842 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:05 crc kubenswrapper[4871]: I1126 05:27:05.959859 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:05Z","lastTransitionTime":"2025-11-26T05:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.063343 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.063401 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.063417 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.063441 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.063460 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:06Z","lastTransitionTime":"2025-11-26T05:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.167125 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.167197 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.167214 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.167237 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.167254 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:06Z","lastTransitionTime":"2025-11-26T05:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.270722 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.270880 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.270907 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.270935 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.270957 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:06Z","lastTransitionTime":"2025-11-26T05:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.274823 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.274940 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:06 crc kubenswrapper[4871]: E1126 05:27:06.275093 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 05:27:06 crc kubenswrapper[4871]: E1126 05:27:06.275132 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 05:27:06 crc kubenswrapper[4871]: E1126 05:27:06.275150 4871 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:27:06 crc kubenswrapper[4871]: E1126 05:27:06.275208 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 05:28:10.275187511 +0000 UTC m=+148.458239137 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:27:06 crc kubenswrapper[4871]: E1126 05:27:06.275442 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 05:27:06 crc kubenswrapper[4871]: E1126 05:27:06.275474 4871 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 05:27:06 crc kubenswrapper[4871]: E1126 05:27:06.275489 4871 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:27:06 crc kubenswrapper[4871]: E1126 05:27:06.275569 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 05:28:10.275517419 +0000 UTC m=+148.458569045 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.374518 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.374664 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.374685 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.374711 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.374729 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:06Z","lastTransitionTime":"2025-11-26T05:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.375583 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:27:06 crc kubenswrapper[4871]: E1126 05:27:06.375795 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:10.375767246 +0000 UTC m=+148.558818882 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.476334 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.476406 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:06 crc kubenswrapper[4871]: E1126 05:27:06.476598 4871 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 05:27:06 crc kubenswrapper[4871]: E1126 05:27:06.476624 4871 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 05:27:06 crc kubenswrapper[4871]: E1126 05:27:06.476669 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 05:28:10.476648789 +0000 UTC m=+148.659700415 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 05:27:06 crc kubenswrapper[4871]: E1126 05:27:06.476743 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 05:28:10.47671291 +0000 UTC m=+148.659764536 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.478157 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.478237 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.478263 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.478295 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.478319 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:06Z","lastTransitionTime":"2025-11-26T05:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.507777 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.507824 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.507878 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:06 crc kubenswrapper[4871]: E1126 05:27:06.508061 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.508149 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:06 crc kubenswrapper[4871]: E1126 05:27:06.508327 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:06 crc kubenswrapper[4871]: E1126 05:27:06.508438 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:06 crc kubenswrapper[4871]: E1126 05:27:06.508749 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.582219 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.582276 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.582300 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.582329 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.582350 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:06Z","lastTransitionTime":"2025-11-26T05:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.685759 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.685865 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.685893 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.685927 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.685948 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:06Z","lastTransitionTime":"2025-11-26T05:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.789042 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.789098 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.789115 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.789137 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.789153 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:06Z","lastTransitionTime":"2025-11-26T05:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.892651 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.892722 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.892745 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.892774 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.892794 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:06Z","lastTransitionTime":"2025-11-26T05:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.995706 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.995790 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.995817 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.995851 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:06 crc kubenswrapper[4871]: I1126 05:27:06.995872 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:06Z","lastTransitionTime":"2025-11-26T05:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.097802 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.097860 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.097878 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.097901 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.097918 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:07Z","lastTransitionTime":"2025-11-26T05:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.201210 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.201274 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.201293 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.201318 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.201335 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:07Z","lastTransitionTime":"2025-11-26T05:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.304977 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.305035 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.305059 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.305086 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.305106 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:07Z","lastTransitionTime":"2025-11-26T05:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.409005 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.409067 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.409086 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.409111 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.409127 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:07Z","lastTransitionTime":"2025-11-26T05:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.512707 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.512796 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.512857 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.512889 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.512973 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:07Z","lastTransitionTime":"2025-11-26T05:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.616624 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.616693 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.616715 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.616745 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.616764 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:07Z","lastTransitionTime":"2025-11-26T05:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.720091 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.720138 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.720151 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.720169 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.720181 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:07Z","lastTransitionTime":"2025-11-26T05:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.824202 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.824259 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.824278 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.824306 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.824325 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:07Z","lastTransitionTime":"2025-11-26T05:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.926999 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.927044 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.927059 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.927077 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:07 crc kubenswrapper[4871]: I1126 05:27:07.927089 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:07Z","lastTransitionTime":"2025-11-26T05:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.030498 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.030614 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.030639 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.030670 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.030694 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:08Z","lastTransitionTime":"2025-11-26T05:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.133825 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.133888 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.133909 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.133941 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.133965 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:08Z","lastTransitionTime":"2025-11-26T05:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.236996 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.237057 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.237078 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.237105 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.237127 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:08Z","lastTransitionTime":"2025-11-26T05:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.340475 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.340624 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.340655 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.340686 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.340704 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:08Z","lastTransitionTime":"2025-11-26T05:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.444244 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.444316 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.444341 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.444372 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.444393 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:08Z","lastTransitionTime":"2025-11-26T05:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.506799 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.506845 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:08 crc kubenswrapper[4871]: E1126 05:27:08.506982 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.507011 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.507020 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:08 crc kubenswrapper[4871]: E1126 05:27:08.507101 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:08 crc kubenswrapper[4871]: E1126 05:27:08.507219 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:08 crc kubenswrapper[4871]: E1126 05:27:08.507370 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.547154 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.547216 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.547233 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.547255 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.547274 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:08Z","lastTransitionTime":"2025-11-26T05:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.650618 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.650694 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.650715 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.650741 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.650758 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:08Z","lastTransitionTime":"2025-11-26T05:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.753876 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.753925 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.753942 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.753963 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.753977 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:08Z","lastTransitionTime":"2025-11-26T05:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.856402 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.856469 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.856491 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.856519 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.856598 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:08Z","lastTransitionTime":"2025-11-26T05:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.960442 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.960587 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.960615 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.960672 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:08 crc kubenswrapper[4871]: I1126 05:27:08.960701 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:08Z","lastTransitionTime":"2025-11-26T05:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.064234 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.064285 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.064301 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.064327 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.064364 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:09Z","lastTransitionTime":"2025-11-26T05:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.167797 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.167857 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.167875 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.167898 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.167917 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:09Z","lastTransitionTime":"2025-11-26T05:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.271414 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.271877 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.271902 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.271930 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.271950 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:09Z","lastTransitionTime":"2025-11-26T05:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.374952 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.375006 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.375022 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.375044 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.375060 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:09Z","lastTransitionTime":"2025-11-26T05:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.477418 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.477730 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.477756 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.477784 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.477806 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:09Z","lastTransitionTime":"2025-11-26T05:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.580594 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.580653 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.580667 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.580686 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.580698 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:09Z","lastTransitionTime":"2025-11-26T05:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.683851 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.683918 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.683935 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.683959 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.683977 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:09Z","lastTransitionTime":"2025-11-26T05:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.787435 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.787497 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.787521 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.787593 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.787611 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:09Z","lastTransitionTime":"2025-11-26T05:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.890646 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.890724 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.890751 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.890782 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.890804 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:09Z","lastTransitionTime":"2025-11-26T05:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.993722 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.993788 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.993812 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.993840 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:09 crc kubenswrapper[4871]: I1126 05:27:09.993860 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:09Z","lastTransitionTime":"2025-11-26T05:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.097299 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.097368 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.097391 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.097415 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.097433 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:10Z","lastTransitionTime":"2025-11-26T05:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.200773 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.200848 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.200870 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.200894 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.200912 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:10Z","lastTransitionTime":"2025-11-26T05:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.305719 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.305790 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.305823 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.305859 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.305880 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:10Z","lastTransitionTime":"2025-11-26T05:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.409933 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.409977 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.409994 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.410018 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.410035 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:10Z","lastTransitionTime":"2025-11-26T05:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.506506 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.506574 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:10 crc kubenswrapper[4871]: E1126 05:27:10.506677 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.506719 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.506506 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:10 crc kubenswrapper[4871]: E1126 05:27:10.507003 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:10 crc kubenswrapper[4871]: E1126 05:27:10.507079 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:10 crc kubenswrapper[4871]: E1126 05:27:10.507030 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.512148 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.512186 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.512198 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.512215 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.512227 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:10Z","lastTransitionTime":"2025-11-26T05:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.564547 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.564595 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.564605 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.564641 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.564653 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:10Z","lastTransitionTime":"2025-11-26T05:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:10 crc kubenswrapper[4871]: E1126 05:27:10.580222 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.585200 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.585240 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.585249 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.585280 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.585290 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:10Z","lastTransitionTime":"2025-11-26T05:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:10 crc kubenswrapper[4871]: E1126 05:27:10.605145 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.610179 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.610240 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.610264 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.610293 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.610318 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:10Z","lastTransitionTime":"2025-11-26T05:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:10 crc kubenswrapper[4871]: E1126 05:27:10.632815 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.638219 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.638263 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.638275 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.638294 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.638309 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:10Z","lastTransitionTime":"2025-11-26T05:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:10 crc kubenswrapper[4871]: E1126 05:27:10.654319 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.658989 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.659015 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.659025 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.659041 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.659053 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:10Z","lastTransitionTime":"2025-11-26T05:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:10 crc kubenswrapper[4871]: E1126 05:27:10.678597 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:10 crc kubenswrapper[4871]: E1126 05:27:10.678707 4871 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.680488 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.680640 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.680687 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.680717 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.680740 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:10Z","lastTransitionTime":"2025-11-26T05:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.783293 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.783353 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.783371 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.783396 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.783413 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:10Z","lastTransitionTime":"2025-11-26T05:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.887144 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.887243 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.887264 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.887295 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.887315 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:10Z","lastTransitionTime":"2025-11-26T05:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.990232 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.990313 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.990335 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.990365 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:10 crc kubenswrapper[4871]: I1126 05:27:10.990386 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:10Z","lastTransitionTime":"2025-11-26T05:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.093482 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.093595 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.093622 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.093652 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.093676 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:11Z","lastTransitionTime":"2025-11-26T05:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.196882 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.196958 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.196982 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.197011 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.197033 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:11Z","lastTransitionTime":"2025-11-26T05:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.299866 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.299960 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.299978 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.300001 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.300019 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:11Z","lastTransitionTime":"2025-11-26T05:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.403057 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.403116 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.403133 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.403158 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.403177 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:11Z","lastTransitionTime":"2025-11-26T05:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.506871 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.506919 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.506943 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.506971 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.506992 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:11Z","lastTransitionTime":"2025-11-26T05:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.609641 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.609704 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.609723 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.609748 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.609766 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:11Z","lastTransitionTime":"2025-11-26T05:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.713028 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.713092 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.713112 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.713138 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.713156 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:11Z","lastTransitionTime":"2025-11-26T05:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.816503 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.816601 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.816624 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.816650 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.816667 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:11Z","lastTransitionTime":"2025-11-26T05:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.919449 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.919509 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.919555 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.919583 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:11 crc kubenswrapper[4871]: I1126 05:27:11.919600 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:11Z","lastTransitionTime":"2025-11-26T05:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.023007 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.023086 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.023105 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.023130 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.023147 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:12Z","lastTransitionTime":"2025-11-26T05:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.125814 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.125901 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.125919 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.125940 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.125955 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:12Z","lastTransitionTime":"2025-11-26T05:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.229719 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.229781 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.229801 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.229879 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.229908 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:12Z","lastTransitionTime":"2025-11-26T05:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.332688 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.332750 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.332769 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.332794 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.332811 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:12Z","lastTransitionTime":"2025-11-26T05:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.436131 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.436244 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.436261 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.436290 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.436308 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:12Z","lastTransitionTime":"2025-11-26T05:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.507097 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.507154 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.507237 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:12 crc kubenswrapper[4871]: E1126 05:27:12.507447 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.507517 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:12 crc kubenswrapper[4871]: E1126 05:27:12.507711 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:12 crc kubenswrapper[4871]: E1126 05:27:12.507897 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:12 crc kubenswrapper[4871]: E1126 05:27:12.508076 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.528315 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c10dc36740ec87314e3a58a4a96133df5ecb4a901474b032895bb318b2c3ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:50Z\\\",\\\"message\\\":\\\"2025-11-26T05:26:05+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_af7260e1-7a21-40cc-8937-d6659f8aa1c5\\\\n2025-11-26T05:26:05+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_af7260e1-7a21-40cc-8937-d6659f8aa1c5 to /host/opt/cni/bin/\\\\n2025-11-26T05:26:05Z [verbose] multus-daemon started\\\\n2025-11-26T05:26:05Z [verbose] Readiness Indicator file check\\\\n2025-11-26T05:26:50Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.539635 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.540222 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.540347 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.540995 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.541104 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:12Z","lastTransitionTime":"2025-11-26T05:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.546419 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.572915 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.589459 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3606989b-2f43-46e5-a90d-7fcfa83a970d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c147cae302ede8de5204573d8405e9aee2503d957606138e742af17dfd03f6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.611962 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.634256 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.644810 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.644884 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.644926 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.644960 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.644981 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:12Z","lastTransitionTime":"2025-11-26T05:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.666009 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:27:02Z\\\",\\\"message\\\":\\\"ssip/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 05:27:02.405821 6869 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 05:27:02.405841 6869 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 05:27:02.405880 6869 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 05:27:02.405877 6869 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 05:27:02.405910 6869 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 05:27:02.405896 6869 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 05:27:02.405948 6869 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 05:27:02.406002 6869 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 05:27:02.406001 6869 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 05:27:02.406024 6869 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 05:27:02.406037 6869 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 05:27:02.406044 6869 factory.go:656] Stopping watch factory\\\\nI1126 05:27:02.406057 6869 ovnkube.go:599] Stopped ovnkube\\\\nI1126 05:27:02.406103 6869 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 05:27:02.406123 6869 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 05:27:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:27:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.685136 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.710698 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.727686 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc94d0cf979cf21c04eefe998ba381ea922779f079981532be41e154faa17e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://735bd64ef711a24fc257d682557fc22bb34bb80f5a0ed913d575ce78a84cb01c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.748101 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.748179 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.748204 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.748239 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.748263 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:12Z","lastTransitionTime":"2025-11-26T05:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.749638 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"012685e4-7f48-4dc5-8c32-b4acd0ba0788\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e936d7790749be736341822bb370fc8729d1e006bffe538ff480a090b856cce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1e537a2837f366cb6a6343ffdcf998611f07d8c19f4fe9c0111862520ebbe5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f55fc830fdd852727a8ac6714209b06ef8394a19d313752c316fd0901a47f2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.769833 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.789897 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.807312 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.825615 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.846362 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.851244 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.851318 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.851341 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.851373 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.851395 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:12Z","lastTransitionTime":"2025-11-26T05:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.865281 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.881607 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30b3c82b-ca2a-4821-86e0-94aa2afce847\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:16Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-z2d5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.954681 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.954737 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.954756 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.954778 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:12 crc kubenswrapper[4871]: I1126 05:27:12.954795 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:12Z","lastTransitionTime":"2025-11-26T05:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.058620 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.058681 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.058704 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.058734 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.058948 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:13Z","lastTransitionTime":"2025-11-26T05:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.161656 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.161758 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.161784 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.161819 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.161839 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:13Z","lastTransitionTime":"2025-11-26T05:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.265354 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.265415 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.265436 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.265461 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.265479 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:13Z","lastTransitionTime":"2025-11-26T05:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.369039 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.369098 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.369117 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.369143 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.369161 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:13Z","lastTransitionTime":"2025-11-26T05:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.472003 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.472050 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.472067 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.472089 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.472105 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:13Z","lastTransitionTime":"2025-11-26T05:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.575865 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.575936 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.575959 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.575990 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.576011 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:13Z","lastTransitionTime":"2025-11-26T05:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.678815 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.678885 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.678910 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.678940 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.678961 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:13Z","lastTransitionTime":"2025-11-26T05:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.781755 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.781823 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.781846 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.781874 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.781896 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:13Z","lastTransitionTime":"2025-11-26T05:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.886646 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.886735 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.886762 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.886794 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.886827 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:13Z","lastTransitionTime":"2025-11-26T05:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.988993 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.989077 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.989102 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.989131 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:13 crc kubenswrapper[4871]: I1126 05:27:13.989153 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:13Z","lastTransitionTime":"2025-11-26T05:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.092337 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.092397 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.092414 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.092441 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.092459 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:14Z","lastTransitionTime":"2025-11-26T05:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.195224 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.195297 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.195316 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.195341 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.195358 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:14Z","lastTransitionTime":"2025-11-26T05:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.298336 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.298411 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.298434 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.298465 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.298484 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:14Z","lastTransitionTime":"2025-11-26T05:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.401008 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.401073 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.401095 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.401124 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.401141 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:14Z","lastTransitionTime":"2025-11-26T05:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.503684 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.503735 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.503751 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.503774 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.503792 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:14Z","lastTransitionTime":"2025-11-26T05:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.506289 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.506319 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:14 crc kubenswrapper[4871]: E1126 05:27:14.506433 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.506554 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.506619 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:14 crc kubenswrapper[4871]: E1126 05:27:14.506585 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:14 crc kubenswrapper[4871]: E1126 05:27:14.506762 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:14 crc kubenswrapper[4871]: E1126 05:27:14.506818 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.606623 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.606690 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.606707 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.606731 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.606748 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:14Z","lastTransitionTime":"2025-11-26T05:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.709964 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.710027 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.710044 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.710068 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.710087 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:14Z","lastTransitionTime":"2025-11-26T05:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.813571 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.813645 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.813666 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.813692 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.813709 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:14Z","lastTransitionTime":"2025-11-26T05:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.916809 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.916863 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.916879 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.916902 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:14 crc kubenswrapper[4871]: I1126 05:27:14.916919 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:14Z","lastTransitionTime":"2025-11-26T05:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.019663 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.019701 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.019711 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.019728 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.019741 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:15Z","lastTransitionTime":"2025-11-26T05:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.122153 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.122199 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.122217 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.122241 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.122257 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:15Z","lastTransitionTime":"2025-11-26T05:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.226376 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.226466 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.226557 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.226590 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.226611 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:15Z","lastTransitionTime":"2025-11-26T05:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.329648 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.329706 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.329723 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.329747 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.329766 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:15Z","lastTransitionTime":"2025-11-26T05:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.432813 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.432859 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.432871 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.432889 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.432902 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:15Z","lastTransitionTime":"2025-11-26T05:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.535278 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.535344 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.535361 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.535388 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.535405 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:15Z","lastTransitionTime":"2025-11-26T05:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.639394 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.639452 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.639470 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.639493 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.639507 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:15Z","lastTransitionTime":"2025-11-26T05:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.743408 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.743470 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.743488 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.743512 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.743556 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:15Z","lastTransitionTime":"2025-11-26T05:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.846894 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.846981 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.847000 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.847027 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.847046 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:15Z","lastTransitionTime":"2025-11-26T05:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.949955 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.950012 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.950031 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.950054 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:15 crc kubenswrapper[4871]: I1126 05:27:15.950073 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:15Z","lastTransitionTime":"2025-11-26T05:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.053505 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.053603 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.053621 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.053644 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.053665 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:16Z","lastTransitionTime":"2025-11-26T05:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.156910 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.156972 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.156990 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.157016 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.157038 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:16Z","lastTransitionTime":"2025-11-26T05:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.259996 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.260052 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.260068 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.260092 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.260109 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:16Z","lastTransitionTime":"2025-11-26T05:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.363194 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.363256 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.363273 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.363296 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.363317 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:16Z","lastTransitionTime":"2025-11-26T05:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.466664 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.466712 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.466731 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.466764 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.466782 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:16Z","lastTransitionTime":"2025-11-26T05:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.506684 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.506956 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:16 crc kubenswrapper[4871]: E1126 05:27:16.508033 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.508318 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.508388 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:16 crc kubenswrapper[4871]: E1126 05:27:16.509089 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:16 crc kubenswrapper[4871]: E1126 05:27:16.510507 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:16 crc kubenswrapper[4871]: E1126 05:27:16.510834 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.513312 4871 scope.go:117] "RemoveContainer" containerID="165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef" Nov 26 05:27:16 crc kubenswrapper[4871]: E1126 05:27:16.514204 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.570491 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.570590 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.570612 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.570644 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.570662 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:16Z","lastTransitionTime":"2025-11-26T05:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.673964 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.674020 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.674056 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.674085 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.674108 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:16Z","lastTransitionTime":"2025-11-26T05:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.777881 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.777918 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.777933 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.777952 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.777962 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:16Z","lastTransitionTime":"2025-11-26T05:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.880836 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.880910 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.880927 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.880954 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.880972 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:16Z","lastTransitionTime":"2025-11-26T05:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.984045 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.984095 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.984113 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.984146 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:16 crc kubenswrapper[4871]: I1126 05:27:16.984164 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:16Z","lastTransitionTime":"2025-11-26T05:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.233070 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.233102 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.233111 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.233124 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.233133 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:17Z","lastTransitionTime":"2025-11-26T05:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.335393 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.335458 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.335480 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.335509 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.335565 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:17Z","lastTransitionTime":"2025-11-26T05:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.439823 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.439907 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.439932 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.439961 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.439984 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:17Z","lastTransitionTime":"2025-11-26T05:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.543470 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.543564 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.543619 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.543646 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.543664 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:17Z","lastTransitionTime":"2025-11-26T05:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.645358 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.645404 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.645417 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.645434 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.645447 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:17Z","lastTransitionTime":"2025-11-26T05:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.748163 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.748226 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.748243 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.748267 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.748284 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:17Z","lastTransitionTime":"2025-11-26T05:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.851349 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.851411 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.851428 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.851453 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.851471 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:17Z","lastTransitionTime":"2025-11-26T05:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.955159 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.955220 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.955239 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.955265 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:17 crc kubenswrapper[4871]: I1126 05:27:17.955285 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:17Z","lastTransitionTime":"2025-11-26T05:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.058630 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.058693 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.058710 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.058735 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.058754 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:18Z","lastTransitionTime":"2025-11-26T05:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.161695 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.161780 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.161805 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.161835 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.161857 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:18Z","lastTransitionTime":"2025-11-26T05:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.265723 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.265789 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.265806 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.265832 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.265855 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:18Z","lastTransitionTime":"2025-11-26T05:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.369042 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.369151 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.369221 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.369254 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.369277 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:18Z","lastTransitionTime":"2025-11-26T05:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.474655 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.474814 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.474882 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.474908 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.474926 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:18Z","lastTransitionTime":"2025-11-26T05:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.507169 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.507269 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.507269 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:18 crc kubenswrapper[4871]: E1126 05:27:18.507434 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.507456 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:18 crc kubenswrapper[4871]: E1126 05:27:18.507638 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:18 crc kubenswrapper[4871]: E1126 05:27:18.507867 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:18 crc kubenswrapper[4871]: E1126 05:27:18.508197 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.577906 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.578069 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.578094 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.578119 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.578137 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:18Z","lastTransitionTime":"2025-11-26T05:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.681630 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.681747 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.681767 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.681790 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.681807 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:18Z","lastTransitionTime":"2025-11-26T05:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.785058 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.785139 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.785162 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.785192 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.785212 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:18Z","lastTransitionTime":"2025-11-26T05:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.887719 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.887797 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.887815 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.887839 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.887858 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:18Z","lastTransitionTime":"2025-11-26T05:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.991188 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.991256 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.991276 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.991300 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:18 crc kubenswrapper[4871]: I1126 05:27:18.991317 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:18Z","lastTransitionTime":"2025-11-26T05:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.094333 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.094387 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.094406 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.094428 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.094445 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:19Z","lastTransitionTime":"2025-11-26T05:27:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.196870 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.196915 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.196931 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.196953 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.196969 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:19Z","lastTransitionTime":"2025-11-26T05:27:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.299687 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.299734 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.299751 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.299777 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.299794 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:19Z","lastTransitionTime":"2025-11-26T05:27:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.402276 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.402333 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.402356 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.402381 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.402404 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:19Z","lastTransitionTime":"2025-11-26T05:27:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.505106 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.505166 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.505184 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.505207 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.505227 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:19Z","lastTransitionTime":"2025-11-26T05:27:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.607896 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.607944 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.607960 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.607984 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.608003 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:19Z","lastTransitionTime":"2025-11-26T05:27:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.711086 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.711139 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.711158 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.711182 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.711198 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:19Z","lastTransitionTime":"2025-11-26T05:27:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.813975 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.814038 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.814056 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.814080 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.814098 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:19Z","lastTransitionTime":"2025-11-26T05:27:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.918657 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.918734 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.918754 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.918781 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:19 crc kubenswrapper[4871]: I1126 05:27:19.918807 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:19Z","lastTransitionTime":"2025-11-26T05:27:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.022057 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.022096 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.022104 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.022118 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.022127 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:20Z","lastTransitionTime":"2025-11-26T05:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.125258 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.125341 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.125374 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.125402 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.125422 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:20Z","lastTransitionTime":"2025-11-26T05:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.228765 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.228820 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.228837 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.228860 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.228876 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:20Z","lastTransitionTime":"2025-11-26T05:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.332041 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.332095 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.332111 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.332133 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.332150 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:20Z","lastTransitionTime":"2025-11-26T05:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.434766 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.434815 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.434833 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.434856 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.434874 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:20Z","lastTransitionTime":"2025-11-26T05:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.506427 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.506437 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:20 crc kubenswrapper[4871]: E1126 05:27:20.506675 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:20 crc kubenswrapper[4871]: E1126 05:27:20.506909 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.507052 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.507263 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:20 crc kubenswrapper[4871]: E1126 05:27:20.507568 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:20 crc kubenswrapper[4871]: E1126 05:27:20.507733 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.538041 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.538103 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.538125 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.538150 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.538168 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:20Z","lastTransitionTime":"2025-11-26T05:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.640994 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.641053 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.641075 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.641099 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.641116 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:20Z","lastTransitionTime":"2025-11-26T05:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.720707 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.720761 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.720828 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.720851 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.720906 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:20Z","lastTransitionTime":"2025-11-26T05:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:20 crc kubenswrapper[4871]: E1126 05:27:20.737404 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:20Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.742508 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.742604 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.742625 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.742652 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.742672 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:20Z","lastTransitionTime":"2025-11-26T05:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:20 crc kubenswrapper[4871]: E1126 05:27:20.763720 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:20Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.769122 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.769248 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.769268 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.769293 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.769311 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:20Z","lastTransitionTime":"2025-11-26T05:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:20 crc kubenswrapper[4871]: E1126 05:27:20.793013 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:20Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.798650 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.798716 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.798734 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.798758 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.798776 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:20Z","lastTransitionTime":"2025-11-26T05:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:20 crc kubenswrapper[4871]: E1126 05:27:20.819915 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:20Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.823613 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.823655 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.823666 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.823683 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.823696 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:20Z","lastTransitionTime":"2025-11-26T05:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:20 crc kubenswrapper[4871]: E1126 05:27:20.839708 4871 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T05:27:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"4a642753-bf13-4675-b42b-d7df47f40ffd\\\",\\\"systemUUID\\\":\\\"f5747dac-e851-4efb-9d51-1bea82126d22\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:20Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:20 crc kubenswrapper[4871]: E1126 05:27:20.839990 4871 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.841952 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.842020 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.842042 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.842067 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.842085 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:20Z","lastTransitionTime":"2025-11-26T05:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.871006 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs\") pod \"network-metrics-daemon-z2d5h\" (UID: \"30b3c82b-ca2a-4821-86e0-94aa2afce847\") " pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:20 crc kubenswrapper[4871]: E1126 05:27:20.871204 4871 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 05:27:20 crc kubenswrapper[4871]: E1126 05:27:20.871330 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs podName:30b3c82b-ca2a-4821-86e0-94aa2afce847 nodeName:}" failed. No retries permitted until 2025-11-26 05:28:24.871300919 +0000 UTC m=+163.054352545 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs") pod "network-metrics-daemon-z2d5h" (UID: "30b3c82b-ca2a-4821-86e0-94aa2afce847") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.944709 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.944806 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.944823 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.944848 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:20 crc kubenswrapper[4871]: I1126 05:27:20.944866 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:20Z","lastTransitionTime":"2025-11-26T05:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.047739 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.047815 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.047827 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.047844 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.047856 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:21Z","lastTransitionTime":"2025-11-26T05:27:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.150921 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.151005 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.151027 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.151079 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.151104 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:21Z","lastTransitionTime":"2025-11-26T05:27:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.253604 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.253661 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.253678 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.253703 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.253720 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:21Z","lastTransitionTime":"2025-11-26T05:27:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.357211 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.357274 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.357292 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.357315 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.357333 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:21Z","lastTransitionTime":"2025-11-26T05:27:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.460939 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.460995 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.461012 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.461039 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.461058 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:21Z","lastTransitionTime":"2025-11-26T05:27:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.563732 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.563798 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.563816 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.563845 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.563862 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:21Z","lastTransitionTime":"2025-11-26T05:27:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.666431 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.666477 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.666489 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.666506 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.666519 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:21Z","lastTransitionTime":"2025-11-26T05:27:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.769772 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.769834 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.769851 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.769874 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.769889 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:21Z","lastTransitionTime":"2025-11-26T05:27:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.873147 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.873184 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.873192 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.873206 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.873215 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:21Z","lastTransitionTime":"2025-11-26T05:27:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.975624 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.975669 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.975684 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.975704 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:21 crc kubenswrapper[4871]: I1126 05:27:21.975717 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:21Z","lastTransitionTime":"2025-11-26T05:27:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.078860 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.078903 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.078919 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.078940 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.078956 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:22Z","lastTransitionTime":"2025-11-26T05:27:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.181741 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.181813 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.181830 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.181853 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.181871 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:22Z","lastTransitionTime":"2025-11-26T05:27:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.284741 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.284816 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.284840 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.284870 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.284892 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:22Z","lastTransitionTime":"2025-11-26T05:27:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.388378 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.388507 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.388576 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.388612 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.388641 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:22Z","lastTransitionTime":"2025-11-26T05:27:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.491221 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.491289 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.491308 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.491331 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.491348 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:22Z","lastTransitionTime":"2025-11-26T05:27:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.506976 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.507111 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.507110 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:22 crc kubenswrapper[4871]: E1126 05:27:22.507217 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.507263 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:22 crc kubenswrapper[4871]: E1126 05:27:22.507972 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:22 crc kubenswrapper[4871]: E1126 05:27:22.508178 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:22 crc kubenswrapper[4871]: E1126 05:27:22.508279 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.529236 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.529491 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2510b0bec5aea151b0dcb2e45515e6bef3fffb777bd2ee061699d0fbd3a6e4d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9722435e9daa7510b5b8ea902b62d6c25e6c7732b9f5ed25f0431c7f99ec1d30\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.546825 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30b3c82b-ca2a-4821-86e0-94aa2afce847\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7h6n4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:16Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-z2d5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.566756 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f310d656-f0c7-4be1-b0c1-47eb2d06fbcd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaab1f1766238e9376791d2b54ca754b6fe49a00f435f0012e34d83043e9f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb2dc5aab04648b97225b40087bb1bbf8f2e092e64c33dbc238e7e132103acbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a790ce9724e3fb06d3f33dfb16ef778028940679653dad2358e65cb780e9e144\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.586464 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.594462 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.594572 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.594608 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.594639 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.594660 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:22Z","lastTransitionTime":"2025-11-26T05:27:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.605973 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3cd6a6d4-9b5f-4d27-a839-d37960bff02c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4a608d6c25f0ef05127f669c29f3cc05598cd0012fab4ba509934af7b41899c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-t2nq7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-zmlz2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.623652 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rpr6z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84290973-bc95-4326-bacd-7c210346620a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c10dc36740ec87314e3a58a4a96133df5ecb4a901474b032895bb318b2c3ca6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:26:50Z\\\",\\\"message\\\":\\\"2025-11-26T05:26:05+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_af7260e1-7a21-40cc-8937-d6659f8aa1c5\\\\n2025-11-26T05:26:05+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_af7260e1-7a21-40cc-8937-d6659f8aa1c5 to /host/opt/cni/bin/\\\\n2025-11-26T05:26:05Z [verbose] multus-daemon started\\\\n2025-11-26T05:26:05Z [verbose] Readiness Indicator file check\\\\n2025-11-26T05:26:50Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mshq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rpr6z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.643405 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce8bd04c2eb8dd6a098e76225d777dcd9e37a3a3810bab991256e853d0f66e9a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.675018 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a0aba42-7edc-4d81-850e-3e3439eeaec8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T05:27:02Z\\\",\\\"message\\\":\\\"ssip/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 05:27:02.405821 6869 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 05:27:02.405841 6869 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 05:27:02.405880 6869 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 05:27:02.405877 6869 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 05:27:02.405910 6869 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 05:27:02.405896 6869 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 05:27:02.405948 6869 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 05:27:02.406002 6869 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 05:27:02.406001 6869 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 05:27:02.406024 6869 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 05:27:02.406037 6869 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 05:27:02.406044 6869 factory.go:656] Stopping watch factory\\\\nI1126 05:27:02.406057 6869 ovnkube.go:599] Stopped ovnkube\\\\nI1126 05:27:02.406103 6869 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 05:27:02.406123 6869 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 05:27:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:27:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9nzm5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qzw7d\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.690132 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vhnk4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4967e853-6782-4ec9-bd03-6a98f803c1a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e528f10d2afebdd68961fc8989125f9b552a3f026de386cf91863f169d95bd5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58wsv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:05Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vhnk4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.697264 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.697306 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.697317 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.697385 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.697432 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:22Z","lastTransitionTime":"2025-11-26T05:27:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.703227 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3606989b-2f43-46e5-a90d-7fcfa83a970d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c147cae302ede8de5204573d8405e9aee2503d957606138e742af17dfd03f6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://90c79989afa2f6c3e041d1e3ed287d4a40bb56d0af80f648872bbd809216de4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.720640 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a2d2c7b-d0d7-40ac-b144-caf1cefe0993\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 05:25:56.299434 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 05:25:56.301231 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2674744984/tls.crt::/tmp/serving-cert-2674744984/tls.key\\\\\\\"\\\\nI1126 05:26:02.089267 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 05:26:02.098317 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 05:26:02.098354 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 05:26:02.098396 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 05:26:02.098406 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 05:26:02.108518 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 05:26:02.108599 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI1126 05:26:02.108591 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1126 05:26:02.108612 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 05:26:02.108637 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 05:26:02.108645 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 05:26:02.108657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 05:26:02.108665 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1126 05:26:02.109633 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.738620 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.755104 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8c1387e7140eaf2e090028e18b82e7dc0da93d2355b7bccd8beb62428993f3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.770491 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-2jk6j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e5b85376-eda9-4770-ad55-b7a59a00e3f3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d4da28c37f95ead390bd655cff933f717fdaef9cb64fb8da2143d444aa1040a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-85lq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:02Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-2jk6j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.793115 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-4scr4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06cf5a9c-035f-4d53-b7e6-e6ecaa6e4a3c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://145005b70db6b025e819ef4d75c805d4b3b99886700f89720055fc5fbfeba249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dc161c70ab64f6275b91264ec7cbc1418c0529159191010e1e5b44c1f9adc178\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0009c8e5ffa1c2a089581b88fcdbe9157ca17d6b49cb230ef8c1ccc2ca9baffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f18cf500a3b01654cda398116bcdc518a256746a24a99baa73b534bc7f2ce7d0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a1083abf66e2cae37391ab3102d5e906550ee299edb9da2e08b8c81c6636c91\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ad3d79248a54d2e5634d084df4d7e1f7ca395355709c2c2020729f5c99f84aa3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://22b5ee8a5368bf5f97c3ea7add8c55677f97b7cbb9497258b26e369a55a98ca8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:26:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:26:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5mg9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:03Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-4scr4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.802286 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.802333 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.802351 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.802375 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.802394 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:22Z","lastTransitionTime":"2025-11-26T05:27:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.811995 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5ebf7372-f87d-40b5-ab3b-52fc9622ff3a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc94d0cf979cf21c04eefe998ba381ea922779f079981532be41e154faa17e3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://735bd64ef711a24fc257d682557fc22bb34bb80f5a0ed913d575ce78a84cb01c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:26:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6brdl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:26:15Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7g5pz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.830138 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"012685e4-7f48-4dc5-8c32-b4acd0ba0788\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T05:25:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e936d7790749be736341822bb370fc8729d1e006bffe538ff480a090b856cce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb1e537a2837f366cb6a6343ffdcf998611f07d8c19f4fe9c0111862520ebbe5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f55fc830fdd852727a8ac6714209b06ef8394a19d313752c316fd0901a47f2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T05:25:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c0bbaa7461a8f3a798409d28c3f0223e4f6162a100a22c48594c24d1b165c246\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T05:25:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T05:25:43Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T05:25:42Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.853803 4871 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T05:26:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T05:27:22Z is after 2025-08-24T17:21:41Z" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.905564 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.905605 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.905618 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.905637 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:22 crc kubenswrapper[4871]: I1126 05:27:22.905649 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:22Z","lastTransitionTime":"2025-11-26T05:27:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.008954 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.009004 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.009026 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.009046 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.009059 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:23Z","lastTransitionTime":"2025-11-26T05:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.112360 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.112426 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.112445 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.112468 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.112485 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:23Z","lastTransitionTime":"2025-11-26T05:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.215565 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.215624 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.215641 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.215665 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.215684 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:23Z","lastTransitionTime":"2025-11-26T05:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.318319 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.318392 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.318416 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.318439 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.318457 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:23Z","lastTransitionTime":"2025-11-26T05:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.421445 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.421504 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.421629 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.421663 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.421690 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:23Z","lastTransitionTime":"2025-11-26T05:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.524700 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.524766 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.524788 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.524818 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.524839 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:23Z","lastTransitionTime":"2025-11-26T05:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.627806 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.627912 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.627932 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.627955 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.627971 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:23Z","lastTransitionTime":"2025-11-26T05:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.730331 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.730401 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.730424 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.730452 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.730474 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:23Z","lastTransitionTime":"2025-11-26T05:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.839513 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.839617 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.839637 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.839665 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.839688 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:23Z","lastTransitionTime":"2025-11-26T05:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.942240 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.942278 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.942289 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.942306 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:23 crc kubenswrapper[4871]: I1126 05:27:23.942318 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:23Z","lastTransitionTime":"2025-11-26T05:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.044791 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.044831 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.044842 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.044858 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.044870 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:24Z","lastTransitionTime":"2025-11-26T05:27:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.149504 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.149591 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.149608 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.149635 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.149652 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:24Z","lastTransitionTime":"2025-11-26T05:27:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.253019 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.253094 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.253114 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.253138 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.253157 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:24Z","lastTransitionTime":"2025-11-26T05:27:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.355636 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.355732 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.355756 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.355784 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.355806 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:24Z","lastTransitionTime":"2025-11-26T05:27:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.459295 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.459346 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.459363 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.459386 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.459403 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:24Z","lastTransitionTime":"2025-11-26T05:27:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.507466 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.507489 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.507515 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:24 crc kubenswrapper[4871]: E1126 05:27:24.507757 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.507835 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:24 crc kubenswrapper[4871]: E1126 05:27:24.507994 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:24 crc kubenswrapper[4871]: E1126 05:27:24.508130 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:24 crc kubenswrapper[4871]: E1126 05:27:24.508351 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.561862 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.561899 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.561916 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.561936 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.561954 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:24Z","lastTransitionTime":"2025-11-26T05:27:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.665593 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.665687 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.665794 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.665842 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.665868 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:24Z","lastTransitionTime":"2025-11-26T05:27:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.769125 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.769188 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.769205 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.769230 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.769248 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:24Z","lastTransitionTime":"2025-11-26T05:27:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.871655 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.871704 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.871719 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.871738 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.871753 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:24Z","lastTransitionTime":"2025-11-26T05:27:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.975662 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.975712 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.975724 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.975743 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:24 crc kubenswrapper[4871]: I1126 05:27:24.975755 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:24Z","lastTransitionTime":"2025-11-26T05:27:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.079483 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.079642 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.079671 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.079714 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.079735 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:25Z","lastTransitionTime":"2025-11-26T05:27:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.183071 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.183138 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.183156 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.183183 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.183203 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:25Z","lastTransitionTime":"2025-11-26T05:27:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.285639 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.285691 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.285701 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.285717 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.285726 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:25Z","lastTransitionTime":"2025-11-26T05:27:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.388974 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.389024 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.389041 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.389064 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.389081 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:25Z","lastTransitionTime":"2025-11-26T05:27:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.492096 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.492172 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.492196 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.492229 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.492250 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:25Z","lastTransitionTime":"2025-11-26T05:27:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.595633 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.595697 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.595717 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.595739 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.595755 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:25Z","lastTransitionTime":"2025-11-26T05:27:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.698743 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.698812 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.698833 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.698863 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.698885 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:25Z","lastTransitionTime":"2025-11-26T05:27:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.801947 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.801996 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.802012 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.802033 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.802051 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:25Z","lastTransitionTime":"2025-11-26T05:27:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.905257 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.905307 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.905324 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.905348 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:25 crc kubenswrapper[4871]: I1126 05:27:25.905366 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:25Z","lastTransitionTime":"2025-11-26T05:27:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.008056 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.008128 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.008146 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.008172 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.008192 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:26Z","lastTransitionTime":"2025-11-26T05:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.111386 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.111438 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.111457 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.111481 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.111499 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:26Z","lastTransitionTime":"2025-11-26T05:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.214878 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.214937 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.214954 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.214976 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.214994 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:26Z","lastTransitionTime":"2025-11-26T05:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.318312 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.318411 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.318428 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.318453 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.318470 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:26Z","lastTransitionTime":"2025-11-26T05:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.421783 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.421854 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.421872 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.421899 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.421919 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:26Z","lastTransitionTime":"2025-11-26T05:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.506718 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.506808 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:26 crc kubenswrapper[4871]: E1126 05:27:26.506860 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.506885 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.507035 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:26 crc kubenswrapper[4871]: E1126 05:27:26.507138 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:26 crc kubenswrapper[4871]: E1126 05:27:26.507253 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:26 crc kubenswrapper[4871]: E1126 05:27:26.507346 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.524489 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.524632 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.524658 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.524685 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.524704 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:26Z","lastTransitionTime":"2025-11-26T05:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.627936 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.627994 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.628011 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.628035 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.628054 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:26Z","lastTransitionTime":"2025-11-26T05:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.730735 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.730803 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.730819 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.730846 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.730864 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:26Z","lastTransitionTime":"2025-11-26T05:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.834028 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.834065 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.834073 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.834087 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.834095 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:26Z","lastTransitionTime":"2025-11-26T05:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.936928 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.936961 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.936971 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.936985 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:26 crc kubenswrapper[4871]: I1126 05:27:26.936994 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:26Z","lastTransitionTime":"2025-11-26T05:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.039745 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.039805 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.039828 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.039851 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.039868 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:27Z","lastTransitionTime":"2025-11-26T05:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.143617 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.143713 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.143742 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.143774 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.143797 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:27Z","lastTransitionTime":"2025-11-26T05:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.246999 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.247074 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.247097 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.247222 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.247242 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:27Z","lastTransitionTime":"2025-11-26T05:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.350434 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.350517 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.350571 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.350594 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.350613 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:27Z","lastTransitionTime":"2025-11-26T05:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.453942 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.454023 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.454040 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.454068 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.454090 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:27Z","lastTransitionTime":"2025-11-26T05:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.508319 4871 scope.go:117] "RemoveContainer" containerID="165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef" Nov 26 05:27:27 crc kubenswrapper[4871]: E1126 05:27:27.508602 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.557717 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.557783 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.557801 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.557827 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.557844 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:27Z","lastTransitionTime":"2025-11-26T05:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.661242 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.661310 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.661334 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.661365 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.661388 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:27Z","lastTransitionTime":"2025-11-26T05:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.765180 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.765237 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.765259 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.765293 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.765316 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:27Z","lastTransitionTime":"2025-11-26T05:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.868612 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.868681 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.868699 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.868724 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.868743 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:27Z","lastTransitionTime":"2025-11-26T05:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.971935 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.971995 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.972012 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.972038 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:27 crc kubenswrapper[4871]: I1126 05:27:27.972058 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:27Z","lastTransitionTime":"2025-11-26T05:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.076128 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.076192 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.076218 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.076249 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.076275 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:28Z","lastTransitionTime":"2025-11-26T05:27:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.179610 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.179675 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.179693 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.179718 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.179739 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:28Z","lastTransitionTime":"2025-11-26T05:27:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.282331 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.282389 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.282406 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.282431 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.282448 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:28Z","lastTransitionTime":"2025-11-26T05:27:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.386135 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.386219 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.386245 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.386279 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.386300 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:28Z","lastTransitionTime":"2025-11-26T05:27:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.489038 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.489096 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.489114 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.489138 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.489157 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:28Z","lastTransitionTime":"2025-11-26T05:27:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.507062 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.507172 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:28 crc kubenswrapper[4871]: E1126 05:27:28.507223 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.507246 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:28 crc kubenswrapper[4871]: E1126 05:27:28.507352 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.507401 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:28 crc kubenswrapper[4871]: E1126 05:27:28.507805 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:28 crc kubenswrapper[4871]: E1126 05:27:28.507947 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.592272 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.592352 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.592378 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.592423 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.592448 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:28Z","lastTransitionTime":"2025-11-26T05:27:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.695672 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.695766 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.695790 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.695819 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.695843 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:28Z","lastTransitionTime":"2025-11-26T05:27:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.798638 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.798702 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.798721 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.798746 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.798765 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:28Z","lastTransitionTime":"2025-11-26T05:27:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.902249 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.902321 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.902339 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.902975 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:28 crc kubenswrapper[4871]: I1126 05:27:28.903061 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:28Z","lastTransitionTime":"2025-11-26T05:27:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.007139 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.007197 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.007216 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.007239 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.007257 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:29Z","lastTransitionTime":"2025-11-26T05:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.109934 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.109980 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.109998 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.110020 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.110036 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:29Z","lastTransitionTime":"2025-11-26T05:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.213770 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.213816 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.213832 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.213854 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.213870 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:29Z","lastTransitionTime":"2025-11-26T05:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.317834 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.317898 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.317915 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.317937 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.317954 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:29Z","lastTransitionTime":"2025-11-26T05:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.421668 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.421734 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.421756 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.421786 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.421808 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:29Z","lastTransitionTime":"2025-11-26T05:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.524604 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.524659 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.524676 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.524699 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.524717 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:29Z","lastTransitionTime":"2025-11-26T05:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.627406 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.627456 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.627473 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.627495 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.627512 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:29Z","lastTransitionTime":"2025-11-26T05:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.730518 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.730691 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.730716 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.730743 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.730761 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:29Z","lastTransitionTime":"2025-11-26T05:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.833602 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.833667 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.833685 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.833712 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.833730 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:29Z","lastTransitionTime":"2025-11-26T05:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.937786 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.937918 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.937944 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.938024 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:29 crc kubenswrapper[4871]: I1126 05:27:29.938057 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:29Z","lastTransitionTime":"2025-11-26T05:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.041670 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.041735 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.041753 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.041778 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.041797 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:30Z","lastTransitionTime":"2025-11-26T05:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.145953 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.146028 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.146053 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.146082 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.146104 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:30Z","lastTransitionTime":"2025-11-26T05:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.249444 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.249621 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.249648 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.249680 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.249702 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:30Z","lastTransitionTime":"2025-11-26T05:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.352783 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.352862 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.352884 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.352915 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.352938 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:30Z","lastTransitionTime":"2025-11-26T05:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.455826 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.455896 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.455916 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.455940 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.455957 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:30Z","lastTransitionTime":"2025-11-26T05:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.506676 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.506783 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:30 crc kubenswrapper[4871]: E1126 05:27:30.506848 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.506676 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.506679 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:30 crc kubenswrapper[4871]: E1126 05:27:30.507016 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:30 crc kubenswrapper[4871]: E1126 05:27:30.507175 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:30 crc kubenswrapper[4871]: E1126 05:27:30.507356 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.559504 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.559606 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.559626 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.559652 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.559674 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:30Z","lastTransitionTime":"2025-11-26T05:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.662670 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.662759 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.662785 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.662814 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.662840 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:30Z","lastTransitionTime":"2025-11-26T05:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.766730 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.766788 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.766809 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.766835 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.766856 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:30Z","lastTransitionTime":"2025-11-26T05:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.869698 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.869747 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.869764 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.869788 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.869805 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:30Z","lastTransitionTime":"2025-11-26T05:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.878129 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.878173 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.878190 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.878210 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.878226 4871 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T05:27:30Z","lastTransitionTime":"2025-11-26T05:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.954481 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9"] Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.955103 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.958933 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.959113 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.959368 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 26 05:27:30 crc kubenswrapper[4871]: I1126 05:27:30.959470 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.018656 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podStartSLOduration=89.018635158 podStartE2EDuration="1m29.018635158s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:27:31.017586704 +0000 UTC m=+109.200638300" watchObservedRunningTime="2025-11-26 05:27:31.018635158 +0000 UTC m=+109.201686754" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.019275 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-rpr6z" podStartSLOduration=89.019257093 podStartE2EDuration="1m29.019257093s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:27:30.980564031 +0000 UTC m=+109.163615627" watchObservedRunningTime="2025-11-26 05:27:31.019257093 +0000 UTC m=+109.202308689" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.083751 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=9.083727376 podStartE2EDuration="9.083727376s" podCreationTimestamp="2025-11-26 05:27:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:27:31.058224032 +0000 UTC m=+109.241275658" watchObservedRunningTime="2025-11-26 05:27:31.083727376 +0000 UTC m=+109.266779002" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.107058 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=89.10703304 podStartE2EDuration="1m29.10703304s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:27:31.083569243 +0000 UTC m=+109.266620899" watchObservedRunningTime="2025-11-26 05:27:31.10703304 +0000 UTC m=+109.290084666" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.117442 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8902e015-8aee-427a-b099-feb98ea85d69-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-lvtc9\" (UID: \"8902e015-8aee-427a-b099-feb98ea85d69\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.117670 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/8902e015-8aee-427a-b099-feb98ea85d69-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-lvtc9\" (UID: \"8902e015-8aee-427a-b099-feb98ea85d69\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.117731 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8902e015-8aee-427a-b099-feb98ea85d69-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-lvtc9\" (UID: \"8902e015-8aee-427a-b099-feb98ea85d69\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.117762 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8902e015-8aee-427a-b099-feb98ea85d69-service-ca\") pod \"cluster-version-operator-5c965bbfc6-lvtc9\" (UID: \"8902e015-8aee-427a-b099-feb98ea85d69\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.117835 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/8902e015-8aee-427a-b099-feb98ea85d69-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-lvtc9\" (UID: \"8902e015-8aee-427a-b099-feb98ea85d69\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.208044 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-vhnk4" podStartSLOduration=89.208018115 podStartE2EDuration="1m29.208018115s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:27:31.191008598 +0000 UTC m=+109.374060184" watchObservedRunningTime="2025-11-26 05:27:31.208018115 +0000 UTC m=+109.391069701" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.208663 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=44.20865717 podStartE2EDuration="44.20865717s" podCreationTimestamp="2025-11-26 05:26:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:27:31.207088533 +0000 UTC m=+109.390140129" watchObservedRunningTime="2025-11-26 05:27:31.20865717 +0000 UTC m=+109.391708756" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.219118 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/8902e015-8aee-427a-b099-feb98ea85d69-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-lvtc9\" (UID: \"8902e015-8aee-427a-b099-feb98ea85d69\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.219170 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8902e015-8aee-427a-b099-feb98ea85d69-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-lvtc9\" (UID: \"8902e015-8aee-427a-b099-feb98ea85d69\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.219191 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8902e015-8aee-427a-b099-feb98ea85d69-service-ca\") pod \"cluster-version-operator-5c965bbfc6-lvtc9\" (UID: \"8902e015-8aee-427a-b099-feb98ea85d69\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.219220 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/8902e015-8aee-427a-b099-feb98ea85d69-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-lvtc9\" (UID: \"8902e015-8aee-427a-b099-feb98ea85d69\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.219284 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/8902e015-8aee-427a-b099-feb98ea85d69-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-lvtc9\" (UID: \"8902e015-8aee-427a-b099-feb98ea85d69\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.219296 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8902e015-8aee-427a-b099-feb98ea85d69-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-lvtc9\" (UID: \"8902e015-8aee-427a-b099-feb98ea85d69\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.219396 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/8902e015-8aee-427a-b099-feb98ea85d69-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-lvtc9\" (UID: \"8902e015-8aee-427a-b099-feb98ea85d69\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.220286 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8902e015-8aee-427a-b099-feb98ea85d69-service-ca\") pod \"cluster-version-operator-5c965bbfc6-lvtc9\" (UID: \"8902e015-8aee-427a-b099-feb98ea85d69\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.227415 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8902e015-8aee-427a-b099-feb98ea85d69-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-lvtc9\" (UID: \"8902e015-8aee-427a-b099-feb98ea85d69\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.236670 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8902e015-8aee-427a-b099-feb98ea85d69-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-lvtc9\" (UID: \"8902e015-8aee-427a-b099-feb98ea85d69\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.273957 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.281558 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-2jk6j" podStartSLOduration=89.281499888 podStartE2EDuration="1m29.281499888s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:27:31.280625608 +0000 UTC m=+109.463677204" watchObservedRunningTime="2025-11-26 05:27:31.281499888 +0000 UTC m=+109.464551474" Nov 26 05:27:31 crc kubenswrapper[4871]: W1126 05:27:31.292320 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8902e015_8aee_427a_b099_feb98ea85d69.slice/crio-1bf2904f6a74616897372e49b3a32fd7930405ba72feb555d3945fc6b1312850 WatchSource:0}: Error finding container 1bf2904f6a74616897372e49b3a32fd7930405ba72feb555d3945fc6b1312850: Status 404 returned error can't find the container with id 1bf2904f6a74616897372e49b3a32fd7930405ba72feb555d3945fc6b1312850 Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.320826 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-4scr4" podStartSLOduration=89.320804164 podStartE2EDuration="1m29.320804164s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:27:31.320466836 +0000 UTC m=+109.503518422" watchObservedRunningTime="2025-11-26 05:27:31.320804164 +0000 UTC m=+109.503855750" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.346292 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7g5pz" podStartSLOduration=88.346269728 podStartE2EDuration="1m28.346269728s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:27:31.333527021 +0000 UTC m=+109.516578617" watchObservedRunningTime="2025-11-26 05:27:31.346269728 +0000 UTC m=+109.529321314" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.377312 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=55.377283661 podStartE2EDuration="55.377283661s" podCreationTimestamp="2025-11-26 05:26:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:27:31.363201133 +0000 UTC m=+109.546252719" watchObservedRunningTime="2025-11-26 05:27:31.377283661 +0000 UTC m=+109.560335247" Nov 26 05:27:31 crc kubenswrapper[4871]: I1126 05:27:31.412382 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=85.412359199 podStartE2EDuration="1m25.412359199s" podCreationTimestamp="2025-11-26 05:26:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:27:31.412009831 +0000 UTC m=+109.595061427" watchObservedRunningTime="2025-11-26 05:27:31.412359199 +0000 UTC m=+109.595410795" Nov 26 05:27:32 crc kubenswrapper[4871]: I1126 05:27:32.292953 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" event={"ID":"8902e015-8aee-427a-b099-feb98ea85d69","Type":"ContainerStarted","Data":"37340c0f3b5dcc1c23c1b426833386b97104805f0b290c669a791e02cac5f64a"} Nov 26 05:27:32 crc kubenswrapper[4871]: I1126 05:27:32.294169 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" event={"ID":"8902e015-8aee-427a-b099-feb98ea85d69","Type":"ContainerStarted","Data":"1bf2904f6a74616897372e49b3a32fd7930405ba72feb555d3945fc6b1312850"} Nov 26 05:27:32 crc kubenswrapper[4871]: I1126 05:27:32.507379 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:32 crc kubenswrapper[4871]: I1126 05:27:32.507423 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:32 crc kubenswrapper[4871]: I1126 05:27:32.507522 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:32 crc kubenswrapper[4871]: I1126 05:27:32.510695 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:32 crc kubenswrapper[4871]: E1126 05:27:32.510681 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:32 crc kubenswrapper[4871]: E1126 05:27:32.510862 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:32 crc kubenswrapper[4871]: E1126 05:27:32.511105 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:32 crc kubenswrapper[4871]: E1126 05:27:32.511260 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:34 crc kubenswrapper[4871]: I1126 05:27:34.506611 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:34 crc kubenswrapper[4871]: I1126 05:27:34.506620 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:34 crc kubenswrapper[4871]: I1126 05:27:34.506648 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:34 crc kubenswrapper[4871]: I1126 05:27:34.507740 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:34 crc kubenswrapper[4871]: E1126 05:27:34.507964 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:34 crc kubenswrapper[4871]: E1126 05:27:34.508090 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:34 crc kubenswrapper[4871]: E1126 05:27:34.508195 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:34 crc kubenswrapper[4871]: E1126 05:27:34.508436 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:36 crc kubenswrapper[4871]: I1126 05:27:36.506634 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:36 crc kubenswrapper[4871]: I1126 05:27:36.506775 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:36 crc kubenswrapper[4871]: E1126 05:27:36.506862 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:36 crc kubenswrapper[4871]: I1126 05:27:36.506701 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:36 crc kubenswrapper[4871]: I1126 05:27:36.506718 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:36 crc kubenswrapper[4871]: E1126 05:27:36.506974 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:36 crc kubenswrapper[4871]: E1126 05:27:36.507085 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:36 crc kubenswrapper[4871]: E1126 05:27:36.507253 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:37 crc kubenswrapper[4871]: I1126 05:27:37.309744 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rpr6z_84290973-bc95-4326-bacd-7c210346620a/kube-multus/1.log" Nov 26 05:27:37 crc kubenswrapper[4871]: I1126 05:27:37.310848 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rpr6z_84290973-bc95-4326-bacd-7c210346620a/kube-multus/0.log" Nov 26 05:27:37 crc kubenswrapper[4871]: I1126 05:27:37.310920 4871 generic.go:334] "Generic (PLEG): container finished" podID="84290973-bc95-4326-bacd-7c210346620a" containerID="2c10dc36740ec87314e3a58a4a96133df5ecb4a901474b032895bb318b2c3ca6" exitCode=1 Nov 26 05:27:37 crc kubenswrapper[4871]: I1126 05:27:37.310967 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rpr6z" event={"ID":"84290973-bc95-4326-bacd-7c210346620a","Type":"ContainerDied","Data":"2c10dc36740ec87314e3a58a4a96133df5ecb4a901474b032895bb318b2c3ca6"} Nov 26 05:27:37 crc kubenswrapper[4871]: I1126 05:27:37.311014 4871 scope.go:117] "RemoveContainer" containerID="dce039c9ccb30625acfa0419f9487ef577b8f0c11af2b6c86598f764433837ce" Nov 26 05:27:37 crc kubenswrapper[4871]: I1126 05:27:37.311785 4871 scope.go:117] "RemoveContainer" containerID="2c10dc36740ec87314e3a58a4a96133df5ecb4a901474b032895bb318b2c3ca6" Nov 26 05:27:37 crc kubenswrapper[4871]: E1126 05:27:37.312127 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-rpr6z_openshift-multus(84290973-bc95-4326-bacd-7c210346620a)\"" pod="openshift-multus/multus-rpr6z" podUID="84290973-bc95-4326-bacd-7c210346620a" Nov 26 05:27:37 crc kubenswrapper[4871]: I1126 05:27:37.340701 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-lvtc9" podStartSLOduration=95.340682724 podStartE2EDuration="1m35.340682724s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:27:32.31508207 +0000 UTC m=+110.498133706" watchObservedRunningTime="2025-11-26 05:27:37.340682724 +0000 UTC m=+115.523734310" Nov 26 05:27:38 crc kubenswrapper[4871]: I1126 05:27:38.316494 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rpr6z_84290973-bc95-4326-bacd-7c210346620a/kube-multus/1.log" Nov 26 05:27:38 crc kubenswrapper[4871]: I1126 05:27:38.506713 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:38 crc kubenswrapper[4871]: I1126 05:27:38.506777 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:38 crc kubenswrapper[4871]: E1126 05:27:38.506835 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:38 crc kubenswrapper[4871]: I1126 05:27:38.507048 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:38 crc kubenswrapper[4871]: E1126 05:27:38.507080 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:38 crc kubenswrapper[4871]: I1126 05:27:38.507094 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:38 crc kubenswrapper[4871]: E1126 05:27:38.507157 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:38 crc kubenswrapper[4871]: E1126 05:27:38.507503 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:40 crc kubenswrapper[4871]: I1126 05:27:40.506684 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:40 crc kubenswrapper[4871]: I1126 05:27:40.506797 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:40 crc kubenswrapper[4871]: I1126 05:27:40.506735 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:40 crc kubenswrapper[4871]: I1126 05:27:40.506711 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:40 crc kubenswrapper[4871]: E1126 05:27:40.506965 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:40 crc kubenswrapper[4871]: E1126 05:27:40.507067 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:40 crc kubenswrapper[4871]: E1126 05:27:40.507176 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:40 crc kubenswrapper[4871]: E1126 05:27:40.507328 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:41 crc kubenswrapper[4871]: I1126 05:27:41.508306 4871 scope.go:117] "RemoveContainer" containerID="165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef" Nov 26 05:27:41 crc kubenswrapper[4871]: E1126 05:27:41.508780 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qzw7d_openshift-ovn-kubernetes(6a0aba42-7edc-4d81-850e-3e3439eeaec8)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" Nov 26 05:27:42 crc kubenswrapper[4871]: E1126 05:27:42.451071 4871 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 26 05:27:42 crc kubenswrapper[4871]: I1126 05:27:42.506456 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:42 crc kubenswrapper[4871]: I1126 05:27:42.506605 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:42 crc kubenswrapper[4871]: E1126 05:27:42.506682 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:42 crc kubenswrapper[4871]: E1126 05:27:42.506841 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:42 crc kubenswrapper[4871]: I1126 05:27:42.506866 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:42 crc kubenswrapper[4871]: I1126 05:27:42.506984 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:42 crc kubenswrapper[4871]: E1126 05:27:42.509167 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:42 crc kubenswrapper[4871]: E1126 05:27:42.509359 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:42 crc kubenswrapper[4871]: E1126 05:27:42.623491 4871 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 05:27:44 crc kubenswrapper[4871]: I1126 05:27:44.506688 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:44 crc kubenswrapper[4871]: I1126 05:27:44.506706 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:44 crc kubenswrapper[4871]: E1126 05:27:44.507288 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:44 crc kubenswrapper[4871]: I1126 05:27:44.506843 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:44 crc kubenswrapper[4871]: I1126 05:27:44.506813 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:44 crc kubenswrapper[4871]: E1126 05:27:44.507457 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:44 crc kubenswrapper[4871]: E1126 05:27:44.507675 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:44 crc kubenswrapper[4871]: E1126 05:27:44.507832 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:46 crc kubenswrapper[4871]: I1126 05:27:46.507184 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:46 crc kubenswrapper[4871]: I1126 05:27:46.507263 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:46 crc kubenswrapper[4871]: I1126 05:27:46.507268 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:46 crc kubenswrapper[4871]: I1126 05:27:46.507184 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:46 crc kubenswrapper[4871]: E1126 05:27:46.507445 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:46 crc kubenswrapper[4871]: E1126 05:27:46.507587 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:46 crc kubenswrapper[4871]: E1126 05:27:46.507731 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:46 crc kubenswrapper[4871]: E1126 05:27:46.507849 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:47 crc kubenswrapper[4871]: E1126 05:27:47.625695 4871 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 05:27:48 crc kubenswrapper[4871]: I1126 05:27:48.507397 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:48 crc kubenswrapper[4871]: I1126 05:27:48.507497 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:48 crc kubenswrapper[4871]: I1126 05:27:48.507499 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:48 crc kubenswrapper[4871]: I1126 05:27:48.507585 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:48 crc kubenswrapper[4871]: E1126 05:27:48.507812 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:48 crc kubenswrapper[4871]: E1126 05:27:48.508008 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:48 crc kubenswrapper[4871]: E1126 05:27:48.508076 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:48 crc kubenswrapper[4871]: E1126 05:27:48.508174 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:48 crc kubenswrapper[4871]: I1126 05:27:48.509430 4871 scope.go:117] "RemoveContainer" containerID="2c10dc36740ec87314e3a58a4a96133df5ecb4a901474b032895bb318b2c3ca6" Nov 26 05:27:49 crc kubenswrapper[4871]: I1126 05:27:49.361379 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rpr6z_84290973-bc95-4326-bacd-7c210346620a/kube-multus/1.log" Nov 26 05:27:49 crc kubenswrapper[4871]: I1126 05:27:49.361458 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rpr6z" event={"ID":"84290973-bc95-4326-bacd-7c210346620a","Type":"ContainerStarted","Data":"417bc65daf58d30ae61be4b4c6e5b7a604e2c0b9c899b8c31c9d9fe1276ba648"} Nov 26 05:27:50 crc kubenswrapper[4871]: I1126 05:27:50.507193 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:50 crc kubenswrapper[4871]: I1126 05:27:50.507260 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:50 crc kubenswrapper[4871]: E1126 05:27:50.507361 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:50 crc kubenswrapper[4871]: I1126 05:27:50.507372 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:50 crc kubenswrapper[4871]: I1126 05:27:50.507403 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:50 crc kubenswrapper[4871]: E1126 05:27:50.507464 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:50 crc kubenswrapper[4871]: E1126 05:27:50.507695 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:50 crc kubenswrapper[4871]: E1126 05:27:50.507915 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:52 crc kubenswrapper[4871]: I1126 05:27:52.507238 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:52 crc kubenswrapper[4871]: I1126 05:27:52.507370 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:52 crc kubenswrapper[4871]: I1126 05:27:52.508574 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:52 crc kubenswrapper[4871]: I1126 05:27:52.508840 4871 scope.go:117] "RemoveContainer" containerID="165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef" Nov 26 05:27:52 crc kubenswrapper[4871]: E1126 05:27:52.508873 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:52 crc kubenswrapper[4871]: E1126 05:27:52.509157 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:52 crc kubenswrapper[4871]: I1126 05:27:52.508741 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:52 crc kubenswrapper[4871]: E1126 05:27:52.510073 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:52 crc kubenswrapper[4871]: E1126 05:27:52.509825 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:52 crc kubenswrapper[4871]: E1126 05:27:52.626210 4871 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 05:27:53 crc kubenswrapper[4871]: I1126 05:27:53.378272 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovnkube-controller/3.log" Nov 26 05:27:53 crc kubenswrapper[4871]: I1126 05:27:53.388192 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerStarted","Data":"2728c981c0552e6cd2d6812b668022b9e869813f5e36a80a43b5b3070b2872f2"} Nov 26 05:27:53 crc kubenswrapper[4871]: I1126 05:27:53.388959 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:27:53 crc kubenswrapper[4871]: I1126 05:27:53.398077 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-z2d5h"] Nov 26 05:27:53 crc kubenswrapper[4871]: I1126 05:27:53.398168 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:53 crc kubenswrapper[4871]: E1126 05:27:53.398252 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:53 crc kubenswrapper[4871]: I1126 05:27:53.425756 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" podStartSLOduration=111.425724948 podStartE2EDuration="1m51.425724948s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:27:53.425236016 +0000 UTC m=+131.608287612" watchObservedRunningTime="2025-11-26 05:27:53.425724948 +0000 UTC m=+131.608776574" Nov 26 05:27:54 crc kubenswrapper[4871]: I1126 05:27:54.506777 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:54 crc kubenswrapper[4871]: I1126 05:27:54.506844 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:54 crc kubenswrapper[4871]: E1126 05:27:54.507058 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:54 crc kubenswrapper[4871]: I1126 05:27:54.507108 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:54 crc kubenswrapper[4871]: E1126 05:27:54.507259 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:54 crc kubenswrapper[4871]: E1126 05:27:54.507424 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:55 crc kubenswrapper[4871]: I1126 05:27:55.507041 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:55 crc kubenswrapper[4871]: E1126 05:27:55.507288 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:56 crc kubenswrapper[4871]: I1126 05:27:56.506745 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:56 crc kubenswrapper[4871]: I1126 05:27:56.506839 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:56 crc kubenswrapper[4871]: I1126 05:27:56.506874 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:56 crc kubenswrapper[4871]: E1126 05:27:56.506957 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 05:27:56 crc kubenswrapper[4871]: E1126 05:27:56.507066 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 05:27:56 crc kubenswrapper[4871]: E1126 05:27:56.507169 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 05:27:57 crc kubenswrapper[4871]: I1126 05:27:57.506776 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:57 crc kubenswrapper[4871]: E1126 05:27:57.507464 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-z2d5h" podUID="30b3c82b-ca2a-4821-86e0-94aa2afce847" Nov 26 05:27:58 crc kubenswrapper[4871]: I1126 05:27:58.507303 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:27:58 crc kubenswrapper[4871]: I1126 05:27:58.507374 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:27:58 crc kubenswrapper[4871]: I1126 05:27:58.507423 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:27:58 crc kubenswrapper[4871]: I1126 05:27:58.511043 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 26 05:27:58 crc kubenswrapper[4871]: I1126 05:27:58.511057 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 26 05:27:58 crc kubenswrapper[4871]: I1126 05:27:58.511191 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 26 05:27:58 crc kubenswrapper[4871]: I1126 05:27:58.512052 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 26 05:27:59 crc kubenswrapper[4871]: I1126 05:27:59.506395 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:27:59 crc kubenswrapper[4871]: I1126 05:27:59.509513 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 26 05:27:59 crc kubenswrapper[4871]: I1126 05:27:59.509574 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.131659 4871 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.178942 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-ql4w4"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.180141 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.181325 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-zb8wp"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.182083 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-zb8wp" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.184156 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-24p5x"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.184656 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.186173 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.186417 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.186668 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.186912 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.187120 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.187346 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.192315 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.192884 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-h5qx5"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.193724 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.195493 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-ml4vf"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.196208 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-ml4vf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.198154 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.198841 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.212135 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.218516 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.222467 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.223079 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.224202 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.224347 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.227916 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.231431 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pz8qb"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.231987 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.238398 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.244077 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.262600 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.263008 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.263149 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.263088 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.264140 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.265168 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-trusted-ca-bundle\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.265249 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-service-ca\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.265344 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/6bece945-b45b-4d5d-aa90-23400b5267d3-available-featuregates\") pod \"openshift-config-operator-7777fb866f-6zcbt\" (UID: \"6bece945-b45b-4d5d-aa90-23400b5267d3\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.265426 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ba518c6b-5e43-4592-b7ca-e3cfe9ca6681-serving-cert\") pod \"console-operator-58897d9998-ml4vf\" (UID: \"ba518c6b-5e43-4592-b7ca-e3cfe9ca6681\") " pod="openshift-console-operator/console-operator-58897d9998-ml4vf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.265536 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ba518c6b-5e43-4592-b7ca-e3cfe9ca6681-trusted-ca\") pod \"console-operator-58897d9998-ml4vf\" (UID: \"ba518c6b-5e43-4592-b7ca-e3cfe9ca6681\") " pod="openshift-console-operator/console-operator-58897d9998-ml4vf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.265633 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-console-config\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.265724 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2225428-c79d-4406-9238-432797b4fa99-config\") pod \"controller-manager-879f6c89f-24p5x\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.265788 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ed9c6e5d-b580-43f0-8741-1082cb1b9caa-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-kb6mv\" (UID: \"ed9c6e5d-b580-43f0-8741-1082cb1b9caa\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.265851 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d2225428-c79d-4406-9238-432797b4fa99-serving-cert\") pod \"controller-manager-879f6c89f-24p5x\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.265990 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8v9k\" (UniqueName: \"kubernetes.io/projected/5b7f9021-d531-4dda-89c5-57eff16b24ec-kube-api-access-s8v9k\") pod \"cluster-samples-operator-665b6dd947-zb8wp\" (UID: \"5b7f9021-d531-4dda-89c5-57eff16b24ec\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-zb8wp" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.266130 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-etcd-client\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.266302 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-serving-cert\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.266550 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-audit-dir\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.266701 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/5b7f9021-d531-4dda-89c5-57eff16b24ec-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-zb8wp\" (UID: \"5b7f9021-d531-4dda-89c5-57eff16b24ec\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-zb8wp" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.266765 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-image-import-ca\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.266950 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/65ed678d-1457-46e2-a59d-1b05e7bbee8c-console-oauth-config\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.267053 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-oauth-serving-cert\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.267102 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-node-pullsecrets\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.267219 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ed9c6e5d-b580-43f0-8741-1082cb1b9caa-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-kb6mv\" (UID: \"ed9c6e5d-b580-43f0-8741-1082cb1b9caa\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.267251 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba518c6b-5e43-4592-b7ca-e3cfe9ca6681-config\") pod \"console-operator-58897d9998-ml4vf\" (UID: \"ba518c6b-5e43-4592-b7ca-e3cfe9ca6681\") " pod="openshift-console-operator/console-operator-58897d9998-ml4vf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.267281 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-audit\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.267456 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/ed9c6e5d-b580-43f0-8741-1082cb1b9caa-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-kb6mv\" (UID: \"ed9c6e5d-b580-43f0-8741-1082cb1b9caa\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.267541 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/65ed678d-1457-46e2-a59d-1b05e7bbee8c-console-serving-cert\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.278457 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6bece945-b45b-4d5d-aa90-23400b5267d3-serving-cert\") pod \"openshift-config-operator-7777fb866f-6zcbt\" (UID: \"6bece945-b45b-4d5d-aa90-23400b5267d3\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.279322 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.279360 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.279424 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jhl8\" (UniqueName: \"kubernetes.io/projected/6bece945-b45b-4d5d-aa90-23400b5267d3-kube-api-access-8jhl8\") pod \"openshift-config-operator-7777fb866f-6zcbt\" (UID: \"6bece945-b45b-4d5d-aa90-23400b5267d3\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.279635 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.279761 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.279993 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.280263 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.280422 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.280596 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-jgrtb"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.279631 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.280864 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-config\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.280903 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d2225428-c79d-4406-9238-432797b4fa99-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-24p5x\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.280931 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmq95\" (UniqueName: \"kubernetes.io/projected/65ed678d-1457-46e2-a59d-1b05e7bbee8c-kube-api-access-dmq95\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.280958 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-etcd-serving-ca\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.280979 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-encryption-config\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.281001 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d2225428-c79d-4406-9238-432797b4fa99-client-ca\") pod \"controller-manager-879f6c89f-24p5x\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.281031 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.281042 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-trusted-ca-bundle\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.281104 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsls8\" (UniqueName: \"kubernetes.io/projected/d2225428-c79d-4406-9238-432797b4fa99-kube-api-access-tsls8\") pod \"controller-manager-879f6c89f-24p5x\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.281111 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.281137 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rc29k\" (UniqueName: \"kubernetes.io/projected/ba518c6b-5e43-4592-b7ca-e3cfe9ca6681-kube-api-access-rc29k\") pod \"console-operator-58897d9998-ml4vf\" (UID: \"ba518c6b-5e43-4592-b7ca-e3cfe9ca6681\") " pod="openshift-console-operator/console-operator-58897d9998-ml4vf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.281163 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vm26j\" (UniqueName: \"kubernetes.io/projected/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-kube-api-access-vm26j\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.281174 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.281191 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rq7fb\" (UniqueName: \"kubernetes.io/projected/ed9c6e5d-b580-43f0-8741-1082cb1b9caa-kube-api-access-rq7fb\") pod \"cluster-image-registry-operator-dc59b4c8b-kb6mv\" (UID: \"ed9c6e5d-b580-43f0-8741-1082cb1b9caa\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.281248 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.281315 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.281325 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.281441 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.281468 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.281517 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.281247 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.281651 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.281627 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.281679 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.282040 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.282378 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.282453 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.282803 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.282905 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.282929 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.283052 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.283153 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.283252 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.283333 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.283398 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.283446 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.283537 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.283407 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.283543 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.283831 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.286154 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.286358 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.286479 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-rdvkb"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.286654 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.287804 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-n27d9"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.288002 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.288347 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.288451 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-rdvkb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.288885 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-n27d9" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.289678 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zhpbn"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.290098 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zhpbn" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.290706 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-tcqk7"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.291489 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.299314 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.299377 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.299569 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-h6n6d"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.299990 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-h6n6d" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.306718 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.308043 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.308703 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-qx4hc"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.309079 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.309227 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qx4hc" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.310212 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.310379 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.310380 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vwk2r"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.310648 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.311120 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vwk2r" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.318711 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-sdxzx"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.319142 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.319430 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.321327 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.321544 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.321597 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.321723 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.321882 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.322012 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.322040 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.322210 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.322245 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.322303 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.322374 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.322465 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.322566 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.322699 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.322818 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.322927 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.323037 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.323143 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.323249 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.323354 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.323757 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.323941 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.324229 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wmtpt"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.327256 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.327999 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-lb2sb"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.328024 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.333637 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.338172 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.339705 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.339955 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-dmkj8"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.351955 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wmtpt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.352619 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.353322 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.368012 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.368921 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.369715 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.370387 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.371019 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.371234 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-9sq6t"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.371699 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dmkj8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.372052 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-9sq6t" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.372658 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.372783 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.373085 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.373267 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-wg5vb"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.373963 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-wg5vb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.376617 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ff9xx"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.377091 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2pb6b"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.377190 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.377618 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.377899 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2pb6b" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.378575 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.378586 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kjfwt"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.379046 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kjfwt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.380139 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-t58x9"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.381024 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-b8k6z"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.381541 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-b8k6z" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.381737 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-t58x9" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382245 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsls8\" (UniqueName: \"kubernetes.io/projected/d2225428-c79d-4406-9238-432797b4fa99-kube-api-access-tsls8\") pod \"controller-manager-879f6c89f-24p5x\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382275 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-669w4\" (UniqueName: \"kubernetes.io/projected/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-kube-api-access-669w4\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382293 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rc29k\" (UniqueName: \"kubernetes.io/projected/ba518c6b-5e43-4592-b7ca-e3cfe9ca6681-kube-api-access-rc29k\") pod \"console-operator-58897d9998-ml4vf\" (UID: \"ba518c6b-5e43-4592-b7ca-e3cfe9ca6681\") " pod="openshift-console-operator/console-operator-58897d9998-ml4vf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382311 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382337 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vm26j\" (UniqueName: \"kubernetes.io/projected/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-kube-api-access-vm26j\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382353 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rq7fb\" (UniqueName: \"kubernetes.io/projected/ed9c6e5d-b580-43f0-8741-1082cb1b9caa-kube-api-access-rq7fb\") pod \"cluster-image-registry-operator-dc59b4c8b-kb6mv\" (UID: \"ed9c6e5d-b580-43f0-8741-1082cb1b9caa\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382368 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382386 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbtkx\" (UniqueName: \"kubernetes.io/projected/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-kube-api-access-hbtkx\") pod \"route-controller-manager-6576b87f9c-68sd6\" (UID: \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382402 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-trusted-ca-bundle\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382418 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-audit-policies\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382434 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-config\") pod \"route-controller-manager-6576b87f9c-68sd6\" (UID: \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382450 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-service-ca\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382465 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-audit-dir\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382480 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/6bece945-b45b-4d5d-aa90-23400b5267d3-available-featuregates\") pod \"openshift-config-operator-7777fb866f-6zcbt\" (UID: \"6bece945-b45b-4d5d-aa90-23400b5267d3\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382495 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ba518c6b-5e43-4592-b7ca-e3cfe9ca6681-serving-cert\") pod \"console-operator-58897d9998-ml4vf\" (UID: \"ba518c6b-5e43-4592-b7ca-e3cfe9ca6681\") " pod="openshift-console-operator/console-operator-58897d9998-ml4vf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382508 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ba518c6b-5e43-4592-b7ca-e3cfe9ca6681-trusted-ca\") pod \"console-operator-58897d9998-ml4vf\" (UID: \"ba518c6b-5e43-4592-b7ca-e3cfe9ca6681\") " pod="openshift-console-operator/console-operator-58897d9998-ml4vf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382539 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-console-config\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382555 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2225428-c79d-4406-9238-432797b4fa99-config\") pod \"controller-manager-879f6c89f-24p5x\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382569 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ed9c6e5d-b580-43f0-8741-1082cb1b9caa-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-kb6mv\" (UID: \"ed9c6e5d-b580-43f0-8741-1082cb1b9caa\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382584 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8v9k\" (UniqueName: \"kubernetes.io/projected/5b7f9021-d531-4dda-89c5-57eff16b24ec-kube-api-access-s8v9k\") pod \"cluster-samples-operator-665b6dd947-zb8wp\" (UID: \"5b7f9021-d531-4dda-89c5-57eff16b24ec\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-zb8wp" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382599 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d2225428-c79d-4406-9238-432797b4fa99-serving-cert\") pod \"controller-manager-879f6c89f-24p5x\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382616 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382639 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-etcd-client\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382654 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-serving-cert\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382671 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9e37bbf9-7c3e-431d-a8af-dd7ca13730e5-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zhpbn\" (UID: \"9e37bbf9-7c3e-431d-a8af-dd7ca13730e5\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zhpbn" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382685 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e37bbf9-7c3e-431d-a8af-dd7ca13730e5-config\") pod \"kube-controller-manager-operator-78b949d7b-zhpbn\" (UID: \"9e37bbf9-7c3e-431d-a8af-dd7ca13730e5\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zhpbn" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382703 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-audit-dir\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382718 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/5b7f9021-d531-4dda-89c5-57eff16b24ec-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-zb8wp\" (UID: \"5b7f9021-d531-4dda-89c5-57eff16b24ec\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-zb8wp" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382736 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382750 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382767 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-image-import-ca\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382784 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-client-ca\") pod \"route-controller-manager-6576b87f9c-68sd6\" (UID: \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382800 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382817 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382832 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9e37bbf9-7c3e-431d-a8af-dd7ca13730e5-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zhpbn\" (UID: \"9e37bbf9-7c3e-431d-a8af-dd7ca13730e5\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zhpbn" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382847 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382877 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/65ed678d-1457-46e2-a59d-1b05e7bbee8c-console-oauth-config\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382893 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-oauth-serving-cert\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382916 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-node-pullsecrets\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382930 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ed9c6e5d-b580-43f0-8741-1082cb1b9caa-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-kb6mv\" (UID: \"ed9c6e5d-b580-43f0-8741-1082cb1b9caa\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382947 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba518c6b-5e43-4592-b7ca-e3cfe9ca6681-config\") pod \"console-operator-58897d9998-ml4vf\" (UID: \"ba518c6b-5e43-4592-b7ca-e3cfe9ca6681\") " pod="openshift-console-operator/console-operator-58897d9998-ml4vf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382962 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-serving-cert\") pod \"route-controller-manager-6576b87f9c-68sd6\" (UID: \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382978 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-audit\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.382993 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/ed9c6e5d-b580-43f0-8741-1082cb1b9caa-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-kb6mv\" (UID: \"ed9c6e5d-b580-43f0-8741-1082cb1b9caa\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.383008 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/65ed678d-1457-46e2-a59d-1b05e7bbee8c-console-serving-cert\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.383026 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6bece945-b45b-4d5d-aa90-23400b5267d3-serving-cert\") pod \"openshift-config-operator-7777fb866f-6zcbt\" (UID: \"6bece945-b45b-4d5d-aa90-23400b5267d3\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.383041 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jhl8\" (UniqueName: \"kubernetes.io/projected/6bece945-b45b-4d5d-aa90-23400b5267d3-kube-api-access-8jhl8\") pod \"openshift-config-operator-7777fb866f-6zcbt\" (UID: \"6bece945-b45b-4d5d-aa90-23400b5267d3\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.383057 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.383072 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-config\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.383088 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d2225428-c79d-4406-9238-432797b4fa99-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-24p5x\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.383103 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmq95\" (UniqueName: \"kubernetes.io/projected/65ed678d-1457-46e2-a59d-1b05e7bbee8c-kube-api-access-dmq95\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.383118 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.383136 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-etcd-serving-ca\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.383150 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-encryption-config\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.383167 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d2225428-c79d-4406-9238-432797b4fa99-client-ca\") pod \"controller-manager-879f6c89f-24p5x\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.383186 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.383207 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-trusted-ca-bundle\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.384282 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-service-ca\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.384742 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/6bece945-b45b-4d5d-aa90-23400b5267d3-available-featuregates\") pod \"openshift-config-operator-7777fb866f-6zcbt\" (UID: \"6bece945-b45b-4d5d-aa90-23400b5267d3\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.386024 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-oauth-serving-cert\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.386083 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-node-pullsecrets\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.386466 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ba518c6b-5e43-4592-b7ca-e3cfe9ca6681-trusted-ca\") pod \"console-operator-58897d9998-ml4vf\" (UID: \"ba518c6b-5e43-4592-b7ca-e3cfe9ca6681\") " pod="openshift-console-operator/console-operator-58897d9998-ml4vf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.387000 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-console-config\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.391069 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-config\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.391494 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-audit\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.391780 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2225428-c79d-4406-9238-432797b4fa99-config\") pod \"controller-manager-879f6c89f-24p5x\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.391911 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-audit-dir\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.392714 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba518c6b-5e43-4592-b7ca-e3cfe9ca6681-config\") pod \"console-operator-58897d9998-ml4vf\" (UID: \"ba518c6b-5e43-4592-b7ca-e3cfe9ca6681\") " pod="openshift-console-operator/console-operator-58897d9998-ml4vf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.392952 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-image-import-ca\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.393503 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d2225428-c79d-4406-9238-432797b4fa99-client-ca\") pod \"controller-manager-879f6c89f-24p5x\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.393684 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-etcd-serving-ca\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.394973 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-etcd-client\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.395482 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ba518c6b-5e43-4592-b7ca-e3cfe9ca6681-serving-cert\") pod \"console-operator-58897d9998-ml4vf\" (UID: \"ba518c6b-5e43-4592-b7ca-e3cfe9ca6681\") " pod="openshift-console-operator/console-operator-58897d9998-ml4vf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.395653 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d2225428-c79d-4406-9238-432797b4fa99-serving-cert\") pod \"controller-manager-879f6c89f-24p5x\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.395764 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.396227 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/65ed678d-1457-46e2-a59d-1b05e7bbee8c-console-oauth-config\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.396472 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-encryption-config\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.396537 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ed9c6e5d-b580-43f0-8741-1082cb1b9caa-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-kb6mv\" (UID: \"ed9c6e5d-b580-43f0-8741-1082cb1b9caa\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.397894 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6bece945-b45b-4d5d-aa90-23400b5267d3-serving-cert\") pod \"openshift-config-operator-7777fb866f-6zcbt\" (UID: \"6bece945-b45b-4d5d-aa90-23400b5267d3\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.398049 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.398658 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-24p5x"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.399292 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.400299 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-f7vqf"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.400315 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.401026 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-f7vqf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.401614 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.400421 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/ed9c6e5d-b580-43f0-8741-1082cb1b9caa-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-kb6mv\" (UID: \"ed9c6e5d-b580-43f0-8741-1082cb1b9caa\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.415421 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/65ed678d-1457-46e2-a59d-1b05e7bbee8c-console-serving-cert\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.415828 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d2225428-c79d-4406-9238-432797b4fa99-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-24p5x\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.417012 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-serving-cert\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.417536 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/5b7f9021-d531-4dda-89c5-57eff16b24ec-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-zb8wp\" (UID: \"5b7f9021-d531-4dda-89c5-57eff16b24ec\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-zb8wp" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.418393 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-trusted-ca-bundle\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.418660 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-sb6f7"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.422735 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.423400 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-trusted-ca-bundle\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.424075 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-cbq2p"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.424939 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-sb6f7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.425556 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-cbq2p" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.434685 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.434849 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.435353 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.436006 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.436068 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.436478 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.436629 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.437023 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.437240 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-ql4w4"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.438785 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rwdp7"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.439930 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rwdp7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.440114 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-rdvkb"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.440349 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.441030 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pz8qb"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.443882 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.443913 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-h5qx5"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.444285 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zhpbn"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.445198 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-qx4hc"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.446110 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-n27d9"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.449204 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.450858 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-tcqk7"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.452232 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.454941 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wmtpt"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.455025 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vwk2r"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.456122 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-zb8wp"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.458382 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-nntmb"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.459322 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-nntmb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.459554 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-9sq6t"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.460481 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.461693 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.462322 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.463304 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-sdxzx"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.464488 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-wg5vb"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.465502 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-jgrtb"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.466499 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-ml4vf"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.467959 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-h6n6d"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.468419 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2pb6b"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.469641 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-cbq2p"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.470548 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ff9xx"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.471715 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-sb6f7"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.472710 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kjfwt"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.474411 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-rcft8"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.475015 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-rcft8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.475559 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-6knjf"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.477053 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.477140 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-nntmb"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.478327 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-b8k6z"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.480030 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-rcft8"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.481605 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.481631 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.482468 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-f7vqf"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.483671 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484197 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42-proxy-tls\") pod \"machine-config-controller-84d6567774-qx4hc\" (UID: \"7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qx4hc" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484314 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484428 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/140ee3ed-e8e4-42c9-b520-8be36158fd66-bound-sa-token\") pod \"ingress-operator-5b745b69d9-pkt8f\" (UID: \"140ee3ed-e8e4-42c9-b520-8be36158fd66\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484571 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njmnj\" (UniqueName: \"kubernetes.io/projected/eb62a12d-ae78-45e2-b32d-126d3643260d-kube-api-access-njmnj\") pod \"openshift-controller-manager-operator-756b6f6bc6-n27d9\" (UID: \"eb62a12d-ae78-45e2-b32d-126d3643260d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-n27d9" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484601 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ba736a92-4399-4f3b-bcc9-fa7a6b30f953-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-h6n6d\" (UID: \"ba736a92-4399-4f3b-bcc9-fa7a6b30f953\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-h6n6d" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484625 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9e37bbf9-7c3e-431d-a8af-dd7ca13730e5-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zhpbn\" (UID: \"9e37bbf9-7c3e-431d-a8af-dd7ca13730e5\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zhpbn" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484643 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e37bbf9-7c3e-431d-a8af-dd7ca13730e5-config\") pod \"kube-controller-manager-operator-78b949d7b-zhpbn\" (UID: \"9e37bbf9-7c3e-431d-a8af-dd7ca13730e5\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zhpbn" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484670 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484688 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-registry-tls\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484708 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-client-ca\") pod \"route-controller-manager-6576b87f9c-68sd6\" (UID: \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484725 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484744 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fdrl\" (UniqueName: \"kubernetes.io/projected/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-kube-api-access-9fdrl\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484762 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/54870e81-ef64-4e07-9190-1bffb7e6db6c-serving-cert\") pod \"authentication-operator-69f744f599-jgrtb\" (UID: \"54870e81-ef64-4e07-9190-1bffb7e6db6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484783 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484805 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484831 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9e37bbf9-7c3e-431d-a8af-dd7ca13730e5-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zhpbn\" (UID: \"9e37bbf9-7c3e-431d-a8af-dd7ca13730e5\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zhpbn" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484845 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484879 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8hzs\" (UniqueName: \"kubernetes.io/projected/317fac77-edf3-46a5-9635-1dd8bb83fea6-kube-api-access-z8hzs\") pod \"downloads-7954f5f757-rdvkb\" (UID: \"317fac77-edf3-46a5-9635-1dd8bb83fea6\") " pod="openshift-console/downloads-7954f5f757-rdvkb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484896 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb62a12d-ae78-45e2-b32d-126d3643260d-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-n27d9\" (UID: \"eb62a12d-ae78-45e2-b32d-126d3643260d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-n27d9" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484911 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/da210515-1701-4ea1-ab3c-4407b119277e-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vwk2r\" (UID: \"da210515-1701-4ea1-ab3c-4407b119277e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vwk2r" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484927 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8pwb\" (UniqueName: \"kubernetes.io/projected/54870e81-ef64-4e07-9190-1bffb7e6db6c-kube-api-access-z8pwb\") pod \"authentication-operator-69f744f599-jgrtb\" (UID: \"54870e81-ef64-4e07-9190-1bffb7e6db6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484970 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-installation-pull-secrets\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.484989 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-serving-cert\") pod \"route-controller-manager-6576b87f9c-68sd6\" (UID: \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485029 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/140ee3ed-e8e4-42c9-b520-8be36158fd66-trusted-ca\") pod \"ingress-operator-5b745b69d9-pkt8f\" (UID: \"140ee3ed-e8e4-42c9-b520-8be36158fd66\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485044 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ba736a92-4399-4f3b-bcc9-fa7a6b30f953-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-h6n6d\" (UID: \"ba736a92-4399-4f3b-bcc9-fa7a6b30f953\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-h6n6d" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485058 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54870e81-ef64-4e07-9190-1bffb7e6db6c-config\") pod \"authentication-operator-69f744f599-jgrtb\" (UID: \"54870e81-ef64-4e07-9190-1bffb7e6db6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485074 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6l24c\" (UniqueName: \"kubernetes.io/projected/140ee3ed-e8e4-42c9-b520-8be36158fd66-kube-api-access-6l24c\") pod \"ingress-operator-5b745b69d9-pkt8f\" (UID: \"140ee3ed-e8e4-42c9-b520-8be36158fd66\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485089 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da210515-1701-4ea1-ab3c-4407b119277e-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vwk2r\" (UID: \"da210515-1701-4ea1-ab3c-4407b119277e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vwk2r" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485105 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-bound-sa-token\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485126 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485143 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485164 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/54870e81-ef64-4e07-9190-1bffb7e6db6c-service-ca-bundle\") pod \"authentication-operator-69f744f599-jgrtb\" (UID: \"54870e81-ef64-4e07-9190-1bffb7e6db6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485182 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485202 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485236 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-669w4\" (UniqueName: \"kubernetes.io/projected/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-kube-api-access-669w4\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485281 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da210515-1701-4ea1-ab3c-4407b119277e-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vwk2r\" (UID: \"da210515-1701-4ea1-ab3c-4407b119277e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vwk2r" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485302 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485316 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/140ee3ed-e8e4-42c9-b520-8be36158fd66-metrics-tls\") pod \"ingress-operator-5b745b69d9-pkt8f\" (UID: \"140ee3ed-e8e4-42c9-b520-8be36158fd66\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485338 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485555 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbtkx\" (UniqueName: \"kubernetes.io/projected/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-kube-api-access-hbtkx\") pod \"route-controller-manager-6576b87f9c-68sd6\" (UID: \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485575 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/54870e81-ef64-4e07-9190-1bffb7e6db6c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-jgrtb\" (UID: \"54870e81-ef64-4e07-9190-1bffb7e6db6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485590 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba736a92-4399-4f3b-bcc9-fa7a6b30f953-config\") pod \"kube-apiserver-operator-766d6c64bb-h6n6d\" (UID: \"ba736a92-4399-4f3b-bcc9-fa7a6b30f953\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-h6n6d" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485611 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485621 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-audit-policies\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485792 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-config\") pod \"route-controller-manager-6576b87f9c-68sd6\" (UID: \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485820 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb62a12d-ae78-45e2-b32d-126d3643260d-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-n27d9\" (UID: \"eb62a12d-ae78-45e2-b32d-126d3643260d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-n27d9" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485844 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-audit-dir\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485875 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-registry-certificates\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485891 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-qx4hc\" (UID: \"7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qx4hc" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485914 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwk27\" (UniqueName: \"kubernetes.io/projected/7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42-kube-api-access-dwk27\") pod \"machine-config-controller-84d6567774-qx4hc\" (UID: \"7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qx4hc" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485932 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-trusted-ca\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485959 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-ca-trust-extracted\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.486052 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-audit-policies\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.485956 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-client-ca\") pod \"route-controller-manager-6576b87f9c-68sd6\" (UID: \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.486905 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.486931 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-audit-dir\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.486969 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-config\") pod \"route-controller-manager-6576b87f9c-68sd6\" (UID: \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.487237 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: E1126 05:28:01.487772 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:01.987757302 +0000 UTC m=+140.170808888 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.488309 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.488875 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.489178 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.489177 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e37bbf9-7c3e-431d-a8af-dd7ca13730e5-config\") pod \"kube-controller-manager-operator-78b949d7b-zhpbn\" (UID: \"9e37bbf9-7c3e-431d-a8af-dd7ca13730e5\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zhpbn" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.490813 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.491133 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.491165 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.491670 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.492294 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9e37bbf9-7c3e-431d-a8af-dd7ca13730e5-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zhpbn\" (UID: \"9e37bbf9-7c3e-431d-a8af-dd7ca13730e5\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zhpbn" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.492373 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.497791 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.498506 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-serving-cert\") pod \"route-controller-manager-6576b87f9c-68sd6\" (UID: \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.502492 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.502643 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.503769 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rwdp7"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.504789 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-6knjf"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.506109 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-t58x9"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.507115 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-7n9dd"] Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.507797 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-7n9dd" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.521918 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.541701 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.561927 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.582886 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.586304 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.586491 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/54870e81-ef64-4e07-9190-1bffb7e6db6c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-jgrtb\" (UID: \"54870e81-ef64-4e07-9190-1bffb7e6db6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" Nov 26 05:28:01 crc kubenswrapper[4871]: E1126 05:28:01.586517 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:02.0864862 +0000 UTC m=+140.269537816 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.586595 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba736a92-4399-4f3b-bcc9-fa7a6b30f953-config\") pod \"kube-apiserver-operator-766d6c64bb-h6n6d\" (UID: \"ba736a92-4399-4f3b-bcc9-fa7a6b30f953\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-h6n6d" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.586710 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/ff2fc558-90c0-467d-b6c0-b395c9b26998-signing-key\") pod \"service-ca-9c57cc56f-cbq2p\" (UID: \"ff2fc558-90c0-467d-b6c0-b395c9b26998\") " pod="openshift-service-ca/service-ca-9c57cc56f-cbq2p" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.586753 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/ff2fc558-90c0-467d-b6c0-b395c9b26998-signing-cabundle\") pod \"service-ca-9c57cc56f-cbq2p\" (UID: \"ff2fc558-90c0-467d-b6c0-b395c9b26998\") " pod="openshift-service-ca/service-ca-9c57cc56f-cbq2p" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.586795 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb62a12d-ae78-45e2-b32d-126d3643260d-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-n27d9\" (UID: \"eb62a12d-ae78-45e2-b32d-126d3643260d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-n27d9" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.586851 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-registry-certificates\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.586998 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4edc5fd4-3610-4fa0-bf22-5ee6a41f6589-config\") pod \"machine-api-operator-5694c8668f-wg5vb\" (UID: \"4edc5fd4-3610-4fa0-bf22-5ee6a41f6589\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wg5vb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.587037 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e028ebf2-4eb3-477a-be5d-ce02dd655d8d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-ff9xx\" (UID: \"e028ebf2-4eb3-477a-be5d-ce02dd655d8d\") " pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.587072 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42-proxy-tls\") pod \"machine-config-controller-84d6567774-qx4hc\" (UID: \"7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qx4hc" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.587133 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/140ee3ed-e8e4-42c9-b520-8be36158fd66-bound-sa-token\") pod \"ingress-operator-5b745b69d9-pkt8f\" (UID: \"140ee3ed-e8e4-42c9-b520-8be36158fd66\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.587188 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/99e7e324-afae-4256-915b-325038c897e4-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-kjfwt\" (UID: \"99e7e324-afae-4256-915b-325038c897e4\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kjfwt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.587221 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/dff5628c-810a-4f12-a683-341ebc57530a-etcd-client\") pod \"etcd-operator-b45778765-sdxzx\" (UID: \"dff5628c-810a-4f12-a683-341ebc57530a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.587255 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wfvk\" (UniqueName: \"kubernetes.io/projected/244d9419-a1ed-45ac-9aca-9291a40ed9b3-kube-api-access-5wfvk\") pod \"dns-default-nntmb\" (UID: \"244d9419-a1ed-45ac-9aca-9291a40ed9b3\") " pod="openshift-dns/dns-default-nntmb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.587289 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/5b6bd3c0-7f03-41d8-bc87-66c374966c21-machine-approver-tls\") pod \"machine-approver-56656f9798-dmkj8\" (UID: \"5b6bd3c0-7f03-41d8-bc87-66c374966c21\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dmkj8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.587325 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fdrl\" (UniqueName: \"kubernetes.io/projected/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-kube-api-access-9fdrl\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.587335 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/54870e81-ef64-4e07-9190-1bffb7e6db6c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-jgrtb\" (UID: \"54870e81-ef64-4e07-9190-1bffb7e6db6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.587361 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5245134d-509e-4548-af72-7c1da043b3f4-serving-cert\") pod \"service-ca-operator-777779d784-sb6f7\" (UID: \"5245134d-509e-4548-af72-7c1da043b3f4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sb6f7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.587392 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5245134d-509e-4548-af72-7c1da043b3f4-config\") pod \"service-ca-operator-777779d784-sb6f7\" (UID: \"5245134d-509e-4548-af72-7c1da043b3f4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sb6f7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.587429 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/704ebe80-008e-4369-8003-6d264aa6f6dc-serving-cert\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.587445 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba736a92-4399-4f3b-bcc9-fa7a6b30f953-config\") pod \"kube-apiserver-operator-766d6c64bb-h6n6d\" (UID: \"ba736a92-4399-4f3b-bcc9-fa7a6b30f953\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-h6n6d" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.587460 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwn4k\" (UniqueName: \"kubernetes.io/projected/cb3f5110-df96-4946-b0a5-3439ab4e1724-kube-api-access-kwn4k\") pod \"collect-profiles-29402235-s44z5\" (UID: \"cb3f5110-df96-4946-b0a5-3439ab4e1724\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.587495 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtbjd\" (UniqueName: \"kubernetes.io/projected/dd4302fa-1a28-4718-b14c-f85e45519916-kube-api-access-qtbjd\") pod \"control-plane-machine-set-operator-78cbb6b69f-2pb6b\" (UID: \"dd4302fa-1a28-4718-b14c-f85e45519916\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2pb6b" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.587567 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8hzs\" (UniqueName: \"kubernetes.io/projected/317fac77-edf3-46a5-9635-1dd8bb83fea6-kube-api-access-z8hzs\") pod \"downloads-7954f5f757-rdvkb\" (UID: \"317fac77-edf3-46a5-9635-1dd8bb83fea6\") " pod="openshift-console/downloads-7954f5f757-rdvkb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.587601 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb62a12d-ae78-45e2-b32d-126d3643260d-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-n27d9\" (UID: \"eb62a12d-ae78-45e2-b32d-126d3643260d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-n27d9" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.588507 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/704ebe80-008e-4369-8003-6d264aa6f6dc-audit-dir\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.588607 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/66f940f0-55f0-4d12-8376-a997d3c802cd-metrics-tls\") pod \"dns-operator-744455d44c-9sq6t\" (UID: \"66f940f0-55f0-4d12-8376-a997d3c802cd\") " pod="openshift-dns-operator/dns-operator-744455d44c-9sq6t" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.588650 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9swz\" (UniqueName: \"kubernetes.io/projected/ff2fc558-90c0-467d-b6c0-b395c9b26998-kube-api-access-d9swz\") pod \"service-ca-9c57cc56f-cbq2p\" (UID: \"ff2fc558-90c0-467d-b6c0-b395c9b26998\") " pod="openshift-service-ca/service-ca-9c57cc56f-cbq2p" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.588699 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-installation-pull-secrets\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.588703 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb62a12d-ae78-45e2-b32d-126d3643260d-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-n27d9\" (UID: \"eb62a12d-ae78-45e2-b32d-126d3643260d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-n27d9" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.588748 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/680ec585-d304-48b0-9501-7af7e5bc503b-csi-data-dir\") pod \"csi-hostpathplugin-6knjf\" (UID: \"680ec585-d304-48b0-9501-7af7e5bc503b\") " pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.588779 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/244d9419-a1ed-45ac-9aca-9291a40ed9b3-config-volume\") pod \"dns-default-nntmb\" (UID: \"244d9419-a1ed-45ac-9aca-9291a40ed9b3\") " pod="openshift-dns/dns-default-nntmb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.588812 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ba736a92-4399-4f3b-bcc9-fa7a6b30f953-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-h6n6d\" (UID: \"ba736a92-4399-4f3b-bcc9-fa7a6b30f953\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-h6n6d" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.588834 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/680ec585-d304-48b0-9501-7af7e5bc503b-plugins-dir\") pod \"csi-hostpathplugin-6knjf\" (UID: \"680ec585-d304-48b0-9501-7af7e5bc503b\") " pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.588857 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0edbface-b4a5-4b10-bb00-a7650f2a2b77-proxy-tls\") pod \"machine-config-operator-74547568cd-sxhc8\" (UID: \"0edbface-b4a5-4b10-bb00-a7650f2a2b77\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.588879 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9d4b702c-363c-48e1-aac0-8816682160a6-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-f7vqf\" (UID: \"9d4b702c-363c-48e1-aac0-8816682160a6\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-f7vqf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.588902 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54870e81-ef64-4e07-9190-1bffb7e6db6c-config\") pod \"authentication-operator-69f744f599-jgrtb\" (UID: \"54870e81-ef64-4e07-9190-1bffb7e6db6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.588925 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6l24c\" (UniqueName: \"kubernetes.io/projected/140ee3ed-e8e4-42c9-b520-8be36158fd66-kube-api-access-6l24c\") pod \"ingress-operator-5b745b69d9-pkt8f\" (UID: \"140ee3ed-e8e4-42c9-b520-8be36158fd66\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.588949 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da210515-1701-4ea1-ab3c-4407b119277e-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vwk2r\" (UID: \"da210515-1701-4ea1-ab3c-4407b119277e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vwk2r" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.588971 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/704ebe80-008e-4369-8003-6d264aa6f6dc-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.588996 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/966673ca-eeee-4bc2-84c2-805d4f8f9648-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-t58x9\" (UID: \"966673ca-eeee-4bc2-84c2-805d4f8f9648\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-t58x9" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589019 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/54870e81-ef64-4e07-9190-1bffb7e6db6c-service-ca-bundle\") pod \"authentication-operator-69f744f599-jgrtb\" (UID: \"54870e81-ef64-4e07-9190-1bffb7e6db6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589040 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/dff5628c-810a-4f12-a683-341ebc57530a-etcd-service-ca\") pod \"etcd-operator-b45778765-sdxzx\" (UID: \"dff5628c-810a-4f12-a683-341ebc57530a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589061 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knzph\" (UniqueName: \"kubernetes.io/projected/99e7e324-afae-4256-915b-325038c897e4-kube-api-access-knzph\") pod \"openshift-apiserver-operator-796bbdcf4f-kjfwt\" (UID: \"99e7e324-afae-4256-915b-325038c897e4\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kjfwt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589084 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5b6bd3c0-7f03-41d8-bc87-66c374966c21-auth-proxy-config\") pod \"machine-approver-56656f9798-dmkj8\" (UID: \"5b6bd3c0-7f03-41d8-bc87-66c374966c21\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dmkj8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589105 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktn5v\" (UniqueName: \"kubernetes.io/projected/df51ab2b-98e7-4c22-83ba-2bc1f70eaa07-kube-api-access-ktn5v\") pod \"packageserver-d55dfcdfc-k95qw\" (UID: \"df51ab2b-98e7-4c22-83ba-2bc1f70eaa07\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589124 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/559c6fab-fdbb-495f-933a-90a3957ec82c-srv-cert\") pod \"olm-operator-6b444d44fb-wmtpt\" (UID: \"559c6fab-fdbb-495f-933a-90a3957ec82c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wmtpt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589147 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/df51ab2b-98e7-4c22-83ba-2bc1f70eaa07-tmpfs\") pod \"packageserver-d55dfcdfc-k95qw\" (UID: \"df51ab2b-98e7-4c22-83ba-2bc1f70eaa07\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589186 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da210515-1701-4ea1-ab3c-4407b119277e-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vwk2r\" (UID: \"da210515-1701-4ea1-ab3c-4407b119277e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vwk2r" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589209 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxw5m\" (UniqueName: \"kubernetes.io/projected/bfda4547-4814-4d32-ba43-b3ffc061bf81-kube-api-access-hxw5m\") pod \"migrator-59844c95c7-b8k6z\" (UID: \"bfda4547-4814-4d32-ba43-b3ffc061bf81\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-b8k6z" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589231 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/9c115df2-f210-4ff8-a6d8-ffe9d04e739c-certs\") pod \"machine-config-server-7n9dd\" (UID: \"9c115df2-f210-4ff8-a6d8-ffe9d04e739c\") " pod="openshift-machine-config-operator/machine-config-server-7n9dd" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589252 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/df51ab2b-98e7-4c22-83ba-2bc1f70eaa07-apiservice-cert\") pod \"packageserver-d55dfcdfc-k95qw\" (UID: \"df51ab2b-98e7-4c22-83ba-2bc1f70eaa07\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589275 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ddxm\" (UniqueName: \"kubernetes.io/projected/851b2861-b400-41ae-9aae-8e041dc4e85a-kube-api-access-5ddxm\") pod \"ingress-canary-rcft8\" (UID: \"851b2861-b400-41ae-9aae-8e041dc4e85a\") " pod="openshift-ingress-canary/ingress-canary-rcft8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589389 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/680ec585-d304-48b0-9501-7af7e5bc503b-mountpoint-dir\") pod \"csi-hostpathplugin-6knjf\" (UID: \"680ec585-d304-48b0-9501-7af7e5bc503b\") " pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589552 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqqwt\" (UniqueName: \"kubernetes.io/projected/680ec585-d304-48b0-9501-7af7e5bc503b-kube-api-access-cqqwt\") pod \"csi-hostpathplugin-6knjf\" (UID: \"680ec585-d304-48b0-9501-7af7e5bc503b\") " pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589662 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79sbk\" (UniqueName: \"kubernetes.io/projected/0edbface-b4a5-4b10-bb00-a7650f2a2b77-kube-api-access-79sbk\") pod \"machine-config-operator-74547568cd-sxhc8\" (UID: \"0edbface-b4a5-4b10-bb00-a7650f2a2b77\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589716 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/680ec585-d304-48b0-9501-7af7e5bc503b-registration-dir\") pod \"csi-hostpathplugin-6knjf\" (UID: \"680ec585-d304-48b0-9501-7af7e5bc503b\") " pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589737 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/df51ab2b-98e7-4c22-83ba-2bc1f70eaa07-webhook-cert\") pod \"packageserver-d55dfcdfc-k95qw\" (UID: \"df51ab2b-98e7-4c22-83ba-2bc1f70eaa07\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589758 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/851b2861-b400-41ae-9aae-8e041dc4e85a-cert\") pod \"ingress-canary-rcft8\" (UID: \"851b2861-b400-41ae-9aae-8e041dc4e85a\") " pod="openshift-ingress-canary/ingress-canary-rcft8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589785 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwk27\" (UniqueName: \"kubernetes.io/projected/7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42-kube-api-access-dwk27\") pod \"machine-config-controller-84d6567774-qx4hc\" (UID: \"7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qx4hc" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589875 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a54f432f-761c-419d-9c57-654e4f81a28f-metrics-certs\") pod \"router-default-5444994796-lb2sb\" (UID: \"a54f432f-761c-419d-9c57-654e4f81a28f\") " pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589905 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dff5628c-810a-4f12-a683-341ebc57530a-config\") pod \"etcd-operator-b45778765-sdxzx\" (UID: \"dff5628c-810a-4f12-a683-341ebc57530a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589932 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-qx4hc\" (UID: \"7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qx4hc" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589955 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0edbface-b4a5-4b10-bb00-a7650f2a2b77-images\") pod \"machine-config-operator-74547568cd-sxhc8\" (UID: \"0edbface-b4a5-4b10-bb00-a7650f2a2b77\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.589988 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gp6mq\" (UniqueName: \"kubernetes.io/projected/5245134d-509e-4548-af72-7c1da043b3f4-kube-api-access-gp6mq\") pod \"service-ca-operator-777779d784-sb6f7\" (UID: \"5245134d-509e-4548-af72-7c1da043b3f4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sb6f7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.590327 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42-proxy-tls\") pod \"machine-config-controller-84d6567774-qx4hc\" (UID: \"7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qx4hc" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.590653 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-trusted-ca\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.590689 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da210515-1701-4ea1-ab3c-4407b119277e-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vwk2r\" (UID: \"da210515-1701-4ea1-ab3c-4407b119277e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vwk2r" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.590708 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/9c115df2-f210-4ff8-a6d8-ffe9d04e739c-node-bootstrap-token\") pod \"machine-config-server-7n9dd\" (UID: \"9c115df2-f210-4ff8-a6d8-ffe9d04e739c\") " pod="openshift-machine-config-operator/machine-config-server-7n9dd" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.590735 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-ca-trust-extracted\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.590772 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a2e2ff1f-e326-4743-8c32-e69f868ef14c-srv-cert\") pod \"catalog-operator-68c6474976-jj8jj\" (UID: \"a2e2ff1f-e326-4743-8c32-e69f868ef14c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.590796 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/559c6fab-fdbb-495f-933a-90a3957ec82c-profile-collector-cert\") pod \"olm-operator-6b444d44fb-wmtpt\" (UID: \"559c6fab-fdbb-495f-933a-90a3957ec82c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wmtpt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.591065 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54870e81-ef64-4e07-9190-1bffb7e6db6c-config\") pod \"authentication-operator-69f744f599-jgrtb\" (UID: \"54870e81-ef64-4e07-9190-1bffb7e6db6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.591186 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb3f5110-df96-4946-b0a5-3439ab4e1724-config-volume\") pod \"collect-profiles-29402235-s44z5\" (UID: \"cb3f5110-df96-4946-b0a5-3439ab4e1724\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.591234 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p887w\" (UniqueName: \"kubernetes.io/projected/66f940f0-55f0-4d12-8376-a997d3c802cd-kube-api-access-p887w\") pod \"dns-operator-744455d44c-9sq6t\" (UID: \"66f940f0-55f0-4d12-8376-a997d3c802cd\") " pod="openshift-dns-operator/dns-operator-744455d44c-9sq6t" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.591254 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-qx4hc\" (UID: \"7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qx4hc" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.591330 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb62a12d-ae78-45e2-b32d-126d3643260d-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-n27d9\" (UID: \"eb62a12d-ae78-45e2-b32d-126d3643260d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-n27d9" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.591348 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njmnj\" (UniqueName: \"kubernetes.io/projected/eb62a12d-ae78-45e2-b32d-126d3643260d-kube-api-access-njmnj\") pod \"openshift-controller-manager-operator-756b6f6bc6-n27d9\" (UID: \"eb62a12d-ae78-45e2-b32d-126d3643260d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-n27d9" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.591476 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ba736a92-4399-4f3b-bcc9-fa7a6b30f953-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-h6n6d\" (UID: \"ba736a92-4399-4f3b-bcc9-fa7a6b30f953\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-h6n6d" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.591559 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/704ebe80-008e-4369-8003-6d264aa6f6dc-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.591630 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a2e2ff1f-e326-4743-8c32-e69f868ef14c-profile-collector-cert\") pod \"catalog-operator-68c6474976-jj8jj\" (UID: \"a2e2ff1f-e326-4743-8c32-e69f868ef14c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.591676 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lxqhg\" (UniqueName: \"kubernetes.io/projected/0cfdd82f-63f7-4804-af6e-7ec8282bbc92-kube-api-access-lxqhg\") pod \"package-server-manager-789f6589d5-rwdp7\" (UID: \"0cfdd82f-63f7-4804-af6e-7ec8282bbc92\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rwdp7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.591713 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2db4\" (UniqueName: \"kubernetes.io/projected/4edc5fd4-3610-4fa0-bf22-5ee6a41f6589-kube-api-access-j2db4\") pod \"machine-api-operator-5694c8668f-wg5vb\" (UID: \"4edc5fd4-3610-4fa0-bf22-5ee6a41f6589\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wg5vb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.591775 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-registry-tls\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.591821 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/54870e81-ef64-4e07-9190-1bffb7e6db6c-serving-cert\") pod \"authentication-operator-69f744f599-jgrtb\" (UID: \"54870e81-ef64-4e07-9190-1bffb7e6db6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.591919 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nr2hc\" (UniqueName: \"kubernetes.io/projected/a54f432f-761c-419d-9c57-654e4f81a28f-kube-api-access-nr2hc\") pod \"router-default-5444994796-lb2sb\" (UID: \"a54f432f-761c-419d-9c57-654e4f81a28f\") " pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.591951 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-trusted-ca\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.591956 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0edbface-b4a5-4b10-bb00-a7650f2a2b77-auth-proxy-config\") pod \"machine-config-operator-74547568cd-sxhc8\" (UID: \"0edbface-b4a5-4b10-bb00-a7650f2a2b77\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592075 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/da210515-1701-4ea1-ab3c-4407b119277e-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vwk2r\" (UID: \"da210515-1701-4ea1-ab3c-4407b119277e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vwk2r" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592098 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8pwb\" (UniqueName: \"kubernetes.io/projected/54870e81-ef64-4e07-9190-1bffb7e6db6c-kube-api-access-z8pwb\") pod \"authentication-operator-69f744f599-jgrtb\" (UID: \"54870e81-ef64-4e07-9190-1bffb7e6db6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592123 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbp48\" (UniqueName: \"kubernetes.io/projected/a2e2ff1f-e326-4743-8c32-e69f868ef14c-kube-api-access-cbp48\") pod \"catalog-operator-68c6474976-jj8jj\" (UID: \"a2e2ff1f-e326-4743-8c32-e69f868ef14c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592146 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bks9\" (UniqueName: \"kubernetes.io/projected/966673ca-eeee-4bc2-84c2-805d4f8f9648-kube-api-access-7bks9\") pod \"kube-storage-version-migrator-operator-b67b599dd-t58x9\" (UID: \"966673ca-eeee-4bc2-84c2-805d4f8f9648\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-t58x9" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592169 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b6bd3c0-7f03-41d8-bc87-66c374966c21-config\") pod \"machine-approver-56656f9798-dmkj8\" (UID: \"5b6bd3c0-7f03-41d8-bc87-66c374966c21\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dmkj8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592193 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4edc5fd4-3610-4fa0-bf22-5ee6a41f6589-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-wg5vb\" (UID: \"4edc5fd4-3610-4fa0-bf22-5ee6a41f6589\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wg5vb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592214 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bppw\" (UniqueName: \"kubernetes.io/projected/559c6fab-fdbb-495f-933a-90a3957ec82c-kube-api-access-5bppw\") pod \"olm-operator-6b444d44fb-wmtpt\" (UID: \"559c6fab-fdbb-495f-933a-90a3957ec82c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wmtpt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592250 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/dd4302fa-1a28-4718-b14c-f85e45519916-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-2pb6b\" (UID: \"dd4302fa-1a28-4718-b14c-f85e45519916\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2pb6b" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592272 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7m4f\" (UniqueName: \"kubernetes.io/projected/9d4b702c-363c-48e1-aac0-8816682160a6-kube-api-access-t7m4f\") pod \"multus-admission-controller-857f4d67dd-f7vqf\" (UID: \"9d4b702c-363c-48e1-aac0-8816682160a6\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-f7vqf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592294 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/a54f432f-761c-419d-9c57-654e4f81a28f-stats-auth\") pod \"router-default-5444994796-lb2sb\" (UID: \"a54f432f-761c-419d-9c57-654e4f81a28f\") " pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592317 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/0cfdd82f-63f7-4804-af6e-7ec8282bbc92-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-rwdp7\" (UID: \"0cfdd82f-63f7-4804-af6e-7ec8282bbc92\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rwdp7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592341 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/704ebe80-008e-4369-8003-6d264aa6f6dc-encryption-config\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592367 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/140ee3ed-e8e4-42c9-b520-8be36158fd66-trusted-ca\") pod \"ingress-operator-5b745b69d9-pkt8f\" (UID: \"140ee3ed-e8e4-42c9-b520-8be36158fd66\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592389 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4jwv\" (UniqueName: \"kubernetes.io/projected/dff5628c-810a-4f12-a683-341ebc57530a-kube-api-access-k4jwv\") pod \"etcd-operator-b45778765-sdxzx\" (UID: \"dff5628c-810a-4f12-a683-341ebc57530a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592412 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99e7e324-afae-4256-915b-325038c897e4-config\") pod \"openshift-apiserver-operator-796bbdcf4f-kjfwt\" (UID: \"99e7e324-afae-4256-915b-325038c897e4\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kjfwt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592387 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da210515-1701-4ea1-ab3c-4407b119277e-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vwk2r\" (UID: \"da210515-1701-4ea1-ab3c-4407b119277e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vwk2r" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592449 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dff5628c-810a-4f12-a683-341ebc57530a-serving-cert\") pod \"etcd-operator-b45778765-sdxzx\" (UID: \"dff5628c-810a-4f12-a683-341ebc57530a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592489 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e028ebf2-4eb3-477a-be5d-ce02dd655d8d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-ff9xx\" (UID: \"e028ebf2-4eb3-477a-be5d-ce02dd655d8d\") " pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592514 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-bound-sa-token\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592582 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7jgr\" (UniqueName: \"kubernetes.io/projected/5b6bd3c0-7f03-41d8-bc87-66c374966c21-kube-api-access-h7jgr\") pod \"machine-approver-56656f9798-dmkj8\" (UID: \"5b6bd3c0-7f03-41d8-bc87-66c374966c21\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dmkj8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592693 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/966673ca-eeee-4bc2-84c2-805d4f8f9648-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-t58x9\" (UID: \"966673ca-eeee-4bc2-84c2-805d4f8f9648\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-t58x9" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592748 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4edc5fd4-3610-4fa0-bf22-5ee6a41f6589-images\") pod \"machine-api-operator-5694c8668f-wg5vb\" (UID: \"4edc5fd4-3610-4fa0-bf22-5ee6a41f6589\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wg5vb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592781 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592804 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb3f5110-df96-4946-b0a5-3439ab4e1724-secret-volume\") pod \"collect-profiles-29402235-s44z5\" (UID: \"cb3f5110-df96-4946-b0a5-3439ab4e1724\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592827 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/680ec585-d304-48b0-9501-7af7e5bc503b-socket-dir\") pod \"csi-hostpathplugin-6knjf\" (UID: \"680ec585-d304-48b0-9501-7af7e5bc503b\") " pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592918 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/704ebe80-008e-4369-8003-6d264aa6f6dc-audit-policies\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592957 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/a54f432f-761c-419d-9c57-654e4f81a28f-default-certificate\") pod \"router-default-5444994796-lb2sb\" (UID: \"a54f432f-761c-419d-9c57-654e4f81a28f\") " pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592980 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a54f432f-761c-419d-9c57-654e4f81a28f-service-ca-bundle\") pod \"router-default-5444994796-lb2sb\" (UID: \"a54f432f-761c-419d-9c57-654e4f81a28f\") " pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.593021 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxtxk\" (UniqueName: \"kubernetes.io/projected/e028ebf2-4eb3-477a-be5d-ce02dd655d8d-kube-api-access-fxtxk\") pod \"marketplace-operator-79b997595-ff9xx\" (UID: \"e028ebf2-4eb3-477a-be5d-ce02dd655d8d\") " pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.592707 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-ca-trust-extracted\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.593256 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/140ee3ed-e8e4-42c9-b520-8be36158fd66-trusted-ca\") pod \"ingress-operator-5b745b69d9-pkt8f\" (UID: \"140ee3ed-e8e4-42c9-b520-8be36158fd66\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f" Nov 26 05:28:01 crc kubenswrapper[4871]: E1126 05:28:01.593292 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:02.093279888 +0000 UTC m=+140.276331484 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.593322 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/140ee3ed-e8e4-42c9-b520-8be36158fd66-metrics-tls\") pod \"ingress-operator-5b745b69d9-pkt8f\" (UID: \"140ee3ed-e8e4-42c9-b520-8be36158fd66\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.593373 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/704ebe80-008e-4369-8003-6d264aa6f6dc-etcd-client\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.593391 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dw6t7\" (UniqueName: \"kubernetes.io/projected/704ebe80-008e-4369-8003-6d264aa6f6dc-kube-api-access-dw6t7\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.593408 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/244d9419-a1ed-45ac-9aca-9291a40ed9b3-metrics-tls\") pod \"dns-default-nntmb\" (UID: \"244d9419-a1ed-45ac-9aca-9291a40ed9b3\") " pod="openshift-dns/dns-default-nntmb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.593425 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5dfs\" (UniqueName: \"kubernetes.io/projected/9c115df2-f210-4ff8-a6d8-ffe9d04e739c-kube-api-access-m5dfs\") pod \"machine-config-server-7n9dd\" (UID: \"9c115df2-f210-4ff8-a6d8-ffe9d04e739c\") " pod="openshift-machine-config-operator/machine-config-server-7n9dd" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.593440 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/dff5628c-810a-4f12-a683-341ebc57530a-etcd-ca\") pod \"etcd-operator-b45778765-sdxzx\" (UID: \"dff5628c-810a-4f12-a683-341ebc57530a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.593704 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/54870e81-ef64-4e07-9190-1bffb7e6db6c-service-ca-bundle\") pod \"authentication-operator-69f744f599-jgrtb\" (UID: \"54870e81-ef64-4e07-9190-1bffb7e6db6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.593735 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-installation-pull-secrets\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.594430 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ba736a92-4399-4f3b-bcc9-fa7a6b30f953-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-h6n6d\" (UID: \"ba736a92-4399-4f3b-bcc9-fa7a6b30f953\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-h6n6d" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.594604 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-registry-certificates\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.594882 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/54870e81-ef64-4e07-9190-1bffb7e6db6c-serving-cert\") pod \"authentication-operator-69f744f599-jgrtb\" (UID: \"54870e81-ef64-4e07-9190-1bffb7e6db6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.595840 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-registry-tls\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.596270 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/140ee3ed-e8e4-42c9-b520-8be36158fd66-metrics-tls\") pod \"ingress-operator-5b745b69d9-pkt8f\" (UID: \"140ee3ed-e8e4-42c9-b520-8be36158fd66\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.602766 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.622380 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.642834 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.662886 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.681766 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.694760 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.694959 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/704ebe80-008e-4369-8003-6d264aa6f6dc-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.694990 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/966673ca-eeee-4bc2-84c2-805d4f8f9648-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-t58x9\" (UID: \"966673ca-eeee-4bc2-84c2-805d4f8f9648\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-t58x9" Nov 26 05:28:01 crc kubenswrapper[4871]: E1126 05:28:01.695032 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:02.19500146 +0000 UTC m=+140.378053086 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.695093 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/dff5628c-810a-4f12-a683-341ebc57530a-etcd-service-ca\") pod \"etcd-operator-b45778765-sdxzx\" (UID: \"dff5628c-810a-4f12-a683-341ebc57530a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.695160 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knzph\" (UniqueName: \"kubernetes.io/projected/99e7e324-afae-4256-915b-325038c897e4-kube-api-access-knzph\") pod \"openshift-apiserver-operator-796bbdcf4f-kjfwt\" (UID: \"99e7e324-afae-4256-915b-325038c897e4\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kjfwt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.695214 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5b6bd3c0-7f03-41d8-bc87-66c374966c21-auth-proxy-config\") pod \"machine-approver-56656f9798-dmkj8\" (UID: \"5b6bd3c0-7f03-41d8-bc87-66c374966c21\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dmkj8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.695266 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktn5v\" (UniqueName: \"kubernetes.io/projected/df51ab2b-98e7-4c22-83ba-2bc1f70eaa07-kube-api-access-ktn5v\") pod \"packageserver-d55dfcdfc-k95qw\" (UID: \"df51ab2b-98e7-4c22-83ba-2bc1f70eaa07\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.695311 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/559c6fab-fdbb-495f-933a-90a3957ec82c-srv-cert\") pod \"olm-operator-6b444d44fb-wmtpt\" (UID: \"559c6fab-fdbb-495f-933a-90a3957ec82c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wmtpt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.695356 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/df51ab2b-98e7-4c22-83ba-2bc1f70eaa07-tmpfs\") pod \"packageserver-d55dfcdfc-k95qw\" (UID: \"df51ab2b-98e7-4c22-83ba-2bc1f70eaa07\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.695441 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxw5m\" (UniqueName: \"kubernetes.io/projected/bfda4547-4814-4d32-ba43-b3ffc061bf81-kube-api-access-hxw5m\") pod \"migrator-59844c95c7-b8k6z\" (UID: \"bfda4547-4814-4d32-ba43-b3ffc061bf81\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-b8k6z" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.695488 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/9c115df2-f210-4ff8-a6d8-ffe9d04e739c-certs\") pod \"machine-config-server-7n9dd\" (UID: \"9c115df2-f210-4ff8-a6d8-ffe9d04e739c\") " pod="openshift-machine-config-operator/machine-config-server-7n9dd" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.695569 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/680ec585-d304-48b0-9501-7af7e5bc503b-mountpoint-dir\") pod \"csi-hostpathplugin-6knjf\" (UID: \"680ec585-d304-48b0-9501-7af7e5bc503b\") " pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.695617 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/df51ab2b-98e7-4c22-83ba-2bc1f70eaa07-apiservice-cert\") pod \"packageserver-d55dfcdfc-k95qw\" (UID: \"df51ab2b-98e7-4c22-83ba-2bc1f70eaa07\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.695632 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/dff5628c-810a-4f12-a683-341ebc57530a-etcd-service-ca\") pod \"etcd-operator-b45778765-sdxzx\" (UID: \"dff5628c-810a-4f12-a683-341ebc57530a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.695668 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ddxm\" (UniqueName: \"kubernetes.io/projected/851b2861-b400-41ae-9aae-8e041dc4e85a-kube-api-access-5ddxm\") pod \"ingress-canary-rcft8\" (UID: \"851b2861-b400-41ae-9aae-8e041dc4e85a\") " pod="openshift-ingress-canary/ingress-canary-rcft8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.695707 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/680ec585-d304-48b0-9501-7af7e5bc503b-mountpoint-dir\") pod \"csi-hostpathplugin-6knjf\" (UID: \"680ec585-d304-48b0-9501-7af7e5bc503b\") " pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.695744 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/680ec585-d304-48b0-9501-7af7e5bc503b-registration-dir\") pod \"csi-hostpathplugin-6knjf\" (UID: \"680ec585-d304-48b0-9501-7af7e5bc503b\") " pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.695797 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqqwt\" (UniqueName: \"kubernetes.io/projected/680ec585-d304-48b0-9501-7af7e5bc503b-kube-api-access-cqqwt\") pod \"csi-hostpathplugin-6knjf\" (UID: \"680ec585-d304-48b0-9501-7af7e5bc503b\") " pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.695847 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79sbk\" (UniqueName: \"kubernetes.io/projected/0edbface-b4a5-4b10-bb00-a7650f2a2b77-kube-api-access-79sbk\") pod \"machine-config-operator-74547568cd-sxhc8\" (UID: \"0edbface-b4a5-4b10-bb00-a7650f2a2b77\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.695911 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/851b2861-b400-41ae-9aae-8e041dc4e85a-cert\") pod \"ingress-canary-rcft8\" (UID: \"851b2861-b400-41ae-9aae-8e041dc4e85a\") " pod="openshift-ingress-canary/ingress-canary-rcft8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.695957 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/df51ab2b-98e7-4c22-83ba-2bc1f70eaa07-webhook-cert\") pod \"packageserver-d55dfcdfc-k95qw\" (UID: \"df51ab2b-98e7-4c22-83ba-2bc1f70eaa07\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.696031 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a54f432f-761c-419d-9c57-654e4f81a28f-metrics-certs\") pod \"router-default-5444994796-lb2sb\" (UID: \"a54f432f-761c-419d-9c57-654e4f81a28f\") " pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.696048 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/680ec585-d304-48b0-9501-7af7e5bc503b-registration-dir\") pod \"csi-hostpathplugin-6knjf\" (UID: \"680ec585-d304-48b0-9501-7af7e5bc503b\") " pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.696080 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dff5628c-810a-4f12-a683-341ebc57530a-config\") pod \"etcd-operator-b45778765-sdxzx\" (UID: \"dff5628c-810a-4f12-a683-341ebc57530a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.696153 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/9c115df2-f210-4ff8-a6d8-ffe9d04e739c-node-bootstrap-token\") pod \"machine-config-server-7n9dd\" (UID: \"9c115df2-f210-4ff8-a6d8-ffe9d04e739c\") " pod="openshift-machine-config-operator/machine-config-server-7n9dd" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.696203 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0edbface-b4a5-4b10-bb00-a7650f2a2b77-images\") pod \"machine-config-operator-74547568cd-sxhc8\" (UID: \"0edbface-b4a5-4b10-bb00-a7650f2a2b77\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.696255 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gp6mq\" (UniqueName: \"kubernetes.io/projected/5245134d-509e-4548-af72-7c1da043b3f4-kube-api-access-gp6mq\") pod \"service-ca-operator-777779d784-sb6f7\" (UID: \"5245134d-509e-4548-af72-7c1da043b3f4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sb6f7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.696267 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/df51ab2b-98e7-4c22-83ba-2bc1f70eaa07-tmpfs\") pod \"packageserver-d55dfcdfc-k95qw\" (UID: \"df51ab2b-98e7-4c22-83ba-2bc1f70eaa07\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.696309 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a2e2ff1f-e326-4743-8c32-e69f868ef14c-srv-cert\") pod \"catalog-operator-68c6474976-jj8jj\" (UID: \"a2e2ff1f-e326-4743-8c32-e69f868ef14c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.696368 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb3f5110-df96-4946-b0a5-3439ab4e1724-config-volume\") pod \"collect-profiles-29402235-s44z5\" (UID: \"cb3f5110-df96-4946-b0a5-3439ab4e1724\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.696417 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/559c6fab-fdbb-495f-933a-90a3957ec82c-profile-collector-cert\") pod \"olm-operator-6b444d44fb-wmtpt\" (UID: \"559c6fab-fdbb-495f-933a-90a3957ec82c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wmtpt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.696469 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p887w\" (UniqueName: \"kubernetes.io/projected/66f940f0-55f0-4d12-8376-a997d3c802cd-kube-api-access-p887w\") pod \"dns-operator-744455d44c-9sq6t\" (UID: \"66f940f0-55f0-4d12-8376-a997d3c802cd\") " pod="openshift-dns-operator/dns-operator-744455d44c-9sq6t" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.696520 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/704ebe80-008e-4369-8003-6d264aa6f6dc-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.696622 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a2e2ff1f-e326-4743-8c32-e69f868ef14c-profile-collector-cert\") pod \"catalog-operator-68c6474976-jj8jj\" (UID: \"a2e2ff1f-e326-4743-8c32-e69f868ef14c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.696670 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxqhg\" (UniqueName: \"kubernetes.io/projected/0cfdd82f-63f7-4804-af6e-7ec8282bbc92-kube-api-access-lxqhg\") pod \"package-server-manager-789f6589d5-rwdp7\" (UID: \"0cfdd82f-63f7-4804-af6e-7ec8282bbc92\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rwdp7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.696724 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2db4\" (UniqueName: \"kubernetes.io/projected/4edc5fd4-3610-4fa0-bf22-5ee6a41f6589-kube-api-access-j2db4\") pod \"machine-api-operator-5694c8668f-wg5vb\" (UID: \"4edc5fd4-3610-4fa0-bf22-5ee6a41f6589\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wg5vb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.696780 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nr2hc\" (UniqueName: \"kubernetes.io/projected/a54f432f-761c-419d-9c57-654e4f81a28f-kube-api-access-nr2hc\") pod \"router-default-5444994796-lb2sb\" (UID: \"a54f432f-761c-419d-9c57-654e4f81a28f\") " pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.696849 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0edbface-b4a5-4b10-bb00-a7650f2a2b77-auth-proxy-config\") pod \"machine-config-operator-74547568cd-sxhc8\" (UID: \"0edbface-b4a5-4b10-bb00-a7650f2a2b77\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.696954 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cbp48\" (UniqueName: \"kubernetes.io/projected/a2e2ff1f-e326-4743-8c32-e69f868ef14c-kube-api-access-cbp48\") pod \"catalog-operator-68c6474976-jj8jj\" (UID: \"a2e2ff1f-e326-4743-8c32-e69f868ef14c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697006 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bks9\" (UniqueName: \"kubernetes.io/projected/966673ca-eeee-4bc2-84c2-805d4f8f9648-kube-api-access-7bks9\") pod \"kube-storage-version-migrator-operator-b67b599dd-t58x9\" (UID: \"966673ca-eeee-4bc2-84c2-805d4f8f9648\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-t58x9" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697056 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b6bd3c0-7f03-41d8-bc87-66c374966c21-config\") pod \"machine-approver-56656f9798-dmkj8\" (UID: \"5b6bd3c0-7f03-41d8-bc87-66c374966c21\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dmkj8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697106 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4edc5fd4-3610-4fa0-bf22-5ee6a41f6589-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-wg5vb\" (UID: \"4edc5fd4-3610-4fa0-bf22-5ee6a41f6589\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wg5vb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697155 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bppw\" (UniqueName: \"kubernetes.io/projected/559c6fab-fdbb-495f-933a-90a3957ec82c-kube-api-access-5bppw\") pod \"olm-operator-6b444d44fb-wmtpt\" (UID: \"559c6fab-fdbb-495f-933a-90a3957ec82c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wmtpt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697218 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7m4f\" (UniqueName: \"kubernetes.io/projected/9d4b702c-363c-48e1-aac0-8816682160a6-kube-api-access-t7m4f\") pod \"multus-admission-controller-857f4d67dd-f7vqf\" (UID: \"9d4b702c-363c-48e1-aac0-8816682160a6\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-f7vqf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697226 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0edbface-b4a5-4b10-bb00-a7650f2a2b77-images\") pod \"machine-config-operator-74547568cd-sxhc8\" (UID: \"0edbface-b4a5-4b10-bb00-a7650f2a2b77\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697258 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/dd4302fa-1a28-4718-b14c-f85e45519916-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-2pb6b\" (UID: \"dd4302fa-1a28-4718-b14c-f85e45519916\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2pb6b" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697308 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/a54f432f-761c-419d-9c57-654e4f81a28f-stats-auth\") pod \"router-default-5444994796-lb2sb\" (UID: \"a54f432f-761c-419d-9c57-654e4f81a28f\") " pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697341 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/0cfdd82f-63f7-4804-af6e-7ec8282bbc92-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-rwdp7\" (UID: \"0cfdd82f-63f7-4804-af6e-7ec8282bbc92\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rwdp7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697373 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/704ebe80-008e-4369-8003-6d264aa6f6dc-encryption-config\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697409 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4jwv\" (UniqueName: \"kubernetes.io/projected/dff5628c-810a-4f12-a683-341ebc57530a-kube-api-access-k4jwv\") pod \"etcd-operator-b45778765-sdxzx\" (UID: \"dff5628c-810a-4f12-a683-341ebc57530a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697463 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dff5628c-810a-4f12-a683-341ebc57530a-config\") pod \"etcd-operator-b45778765-sdxzx\" (UID: \"dff5628c-810a-4f12-a683-341ebc57530a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697482 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99e7e324-afae-4256-915b-325038c897e4-config\") pod \"openshift-apiserver-operator-796bbdcf4f-kjfwt\" (UID: \"99e7e324-afae-4256-915b-325038c897e4\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kjfwt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697517 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7jgr\" (UniqueName: \"kubernetes.io/projected/5b6bd3c0-7f03-41d8-bc87-66c374966c21-kube-api-access-h7jgr\") pod \"machine-approver-56656f9798-dmkj8\" (UID: \"5b6bd3c0-7f03-41d8-bc87-66c374966c21\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dmkj8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697594 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dff5628c-810a-4f12-a683-341ebc57530a-serving-cert\") pod \"etcd-operator-b45778765-sdxzx\" (UID: \"dff5628c-810a-4f12-a683-341ebc57530a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697644 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e028ebf2-4eb3-477a-be5d-ce02dd655d8d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-ff9xx\" (UID: \"e028ebf2-4eb3-477a-be5d-ce02dd655d8d\") " pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697716 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/966673ca-eeee-4bc2-84c2-805d4f8f9648-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-t58x9\" (UID: \"966673ca-eeee-4bc2-84c2-805d4f8f9648\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-t58x9" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697764 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4edc5fd4-3610-4fa0-bf22-5ee6a41f6589-images\") pod \"machine-api-operator-5694c8668f-wg5vb\" (UID: \"4edc5fd4-3610-4fa0-bf22-5ee6a41f6589\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wg5vb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697773 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0edbface-b4a5-4b10-bb00-a7650f2a2b77-auth-proxy-config\") pod \"machine-config-operator-74547568cd-sxhc8\" (UID: \"0edbface-b4a5-4b10-bb00-a7650f2a2b77\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697833 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697882 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb3f5110-df96-4946-b0a5-3439ab4e1724-secret-volume\") pod \"collect-profiles-29402235-s44z5\" (UID: \"cb3f5110-df96-4946-b0a5-3439ab4e1724\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697930 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a54f432f-761c-419d-9c57-654e4f81a28f-service-ca-bundle\") pod \"router-default-5444994796-lb2sb\" (UID: \"a54f432f-761c-419d-9c57-654e4f81a28f\") " pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.697975 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/680ec585-d304-48b0-9501-7af7e5bc503b-socket-dir\") pod \"csi-hostpathplugin-6knjf\" (UID: \"680ec585-d304-48b0-9501-7af7e5bc503b\") " pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698020 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/704ebe80-008e-4369-8003-6d264aa6f6dc-audit-policies\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698097 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/a54f432f-761c-419d-9c57-654e4f81a28f-default-certificate\") pod \"router-default-5444994796-lb2sb\" (UID: \"a54f432f-761c-419d-9c57-654e4f81a28f\") " pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698148 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxtxk\" (UniqueName: \"kubernetes.io/projected/e028ebf2-4eb3-477a-be5d-ce02dd655d8d-kube-api-access-fxtxk\") pod \"marketplace-operator-79b997595-ff9xx\" (UID: \"e028ebf2-4eb3-477a-be5d-ce02dd655d8d\") " pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" Nov 26 05:28:01 crc kubenswrapper[4871]: E1126 05:28:01.698177 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:02.198157048 +0000 UTC m=+140.381208744 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698228 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/704ebe80-008e-4369-8003-6d264aa6f6dc-etcd-client\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698284 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/dff5628c-810a-4f12-a683-341ebc57530a-etcd-ca\") pod \"etcd-operator-b45778765-sdxzx\" (UID: \"dff5628c-810a-4f12-a683-341ebc57530a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698320 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dw6t7\" (UniqueName: \"kubernetes.io/projected/704ebe80-008e-4369-8003-6d264aa6f6dc-kube-api-access-dw6t7\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698352 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/244d9419-a1ed-45ac-9aca-9291a40ed9b3-metrics-tls\") pod \"dns-default-nntmb\" (UID: \"244d9419-a1ed-45ac-9aca-9291a40ed9b3\") " pod="openshift-dns/dns-default-nntmb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698387 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5dfs\" (UniqueName: \"kubernetes.io/projected/9c115df2-f210-4ff8-a6d8-ffe9d04e739c-kube-api-access-m5dfs\") pod \"machine-config-server-7n9dd\" (UID: \"9c115df2-f210-4ff8-a6d8-ffe9d04e739c\") " pod="openshift-machine-config-operator/machine-config-server-7n9dd" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698144 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/680ec585-d304-48b0-9501-7af7e5bc503b-socket-dir\") pod \"csi-hostpathplugin-6knjf\" (UID: \"680ec585-d304-48b0-9501-7af7e5bc503b\") " pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698423 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/ff2fc558-90c0-467d-b6c0-b395c9b26998-signing-key\") pod \"service-ca-9c57cc56f-cbq2p\" (UID: \"ff2fc558-90c0-467d-b6c0-b395c9b26998\") " pod="openshift-service-ca/service-ca-9c57cc56f-cbq2p" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698461 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/ff2fc558-90c0-467d-b6c0-b395c9b26998-signing-cabundle\") pod \"service-ca-9c57cc56f-cbq2p\" (UID: \"ff2fc558-90c0-467d-b6c0-b395c9b26998\") " pod="openshift-service-ca/service-ca-9c57cc56f-cbq2p" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698502 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e028ebf2-4eb3-477a-be5d-ce02dd655d8d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-ff9xx\" (UID: \"e028ebf2-4eb3-477a-be5d-ce02dd655d8d\") " pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698562 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4edc5fd4-3610-4fa0-bf22-5ee6a41f6589-config\") pod \"machine-api-operator-5694c8668f-wg5vb\" (UID: \"4edc5fd4-3610-4fa0-bf22-5ee6a41f6589\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wg5vb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698640 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/99e7e324-afae-4256-915b-325038c897e4-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-kjfwt\" (UID: \"99e7e324-afae-4256-915b-325038c897e4\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kjfwt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698677 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/5b6bd3c0-7f03-41d8-bc87-66c374966c21-machine-approver-tls\") pod \"machine-approver-56656f9798-dmkj8\" (UID: \"5b6bd3c0-7f03-41d8-bc87-66c374966c21\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dmkj8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698708 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/dff5628c-810a-4f12-a683-341ebc57530a-etcd-client\") pod \"etcd-operator-b45778765-sdxzx\" (UID: \"dff5628c-810a-4f12-a683-341ebc57530a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698743 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wfvk\" (UniqueName: \"kubernetes.io/projected/244d9419-a1ed-45ac-9aca-9291a40ed9b3-kube-api-access-5wfvk\") pod \"dns-default-nntmb\" (UID: \"244d9419-a1ed-45ac-9aca-9291a40ed9b3\") " pod="openshift-dns/dns-default-nntmb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698790 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5245134d-509e-4548-af72-7c1da043b3f4-serving-cert\") pod \"service-ca-operator-777779d784-sb6f7\" (UID: \"5245134d-509e-4548-af72-7c1da043b3f4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sb6f7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698821 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5245134d-509e-4548-af72-7c1da043b3f4-config\") pod \"service-ca-operator-777779d784-sb6f7\" (UID: \"5245134d-509e-4548-af72-7c1da043b3f4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sb6f7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.698855 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/704ebe80-008e-4369-8003-6d264aa6f6dc-serving-cert\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.699144 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a54f432f-761c-419d-9c57-654e4f81a28f-service-ca-bundle\") pod \"router-default-5444994796-lb2sb\" (UID: \"a54f432f-761c-419d-9c57-654e4f81a28f\") " pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.699199 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/dff5628c-810a-4f12-a683-341ebc57530a-etcd-ca\") pod \"etcd-operator-b45778765-sdxzx\" (UID: \"dff5628c-810a-4f12-a683-341ebc57530a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.699759 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtbjd\" (UniqueName: \"kubernetes.io/projected/dd4302fa-1a28-4718-b14c-f85e45519916-kube-api-access-qtbjd\") pod \"control-plane-machine-set-operator-78cbb6b69f-2pb6b\" (UID: \"dd4302fa-1a28-4718-b14c-f85e45519916\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2pb6b" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.699817 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwn4k\" (UniqueName: \"kubernetes.io/projected/cb3f5110-df96-4946-b0a5-3439ab4e1724-kube-api-access-kwn4k\") pod \"collect-profiles-29402235-s44z5\" (UID: \"cb3f5110-df96-4946-b0a5-3439ab4e1724\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.699864 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/704ebe80-008e-4369-8003-6d264aa6f6dc-audit-dir\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.699913 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/66f940f0-55f0-4d12-8376-a997d3c802cd-metrics-tls\") pod \"dns-operator-744455d44c-9sq6t\" (UID: \"66f940f0-55f0-4d12-8376-a997d3c802cd\") " pod="openshift-dns-operator/dns-operator-744455d44c-9sq6t" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.699944 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/680ec585-d304-48b0-9501-7af7e5bc503b-csi-data-dir\") pod \"csi-hostpathplugin-6knjf\" (UID: \"680ec585-d304-48b0-9501-7af7e5bc503b\") " pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.699993 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9swz\" (UniqueName: \"kubernetes.io/projected/ff2fc558-90c0-467d-b6c0-b395c9b26998-kube-api-access-d9swz\") pod \"service-ca-9c57cc56f-cbq2p\" (UID: \"ff2fc558-90c0-467d-b6c0-b395c9b26998\") " pod="openshift-service-ca/service-ca-9c57cc56f-cbq2p" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.700029 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/244d9419-a1ed-45ac-9aca-9291a40ed9b3-config-volume\") pod \"dns-default-nntmb\" (UID: \"244d9419-a1ed-45ac-9aca-9291a40ed9b3\") " pod="openshift-dns/dns-default-nntmb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.700095 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/680ec585-d304-48b0-9501-7af7e5bc503b-plugins-dir\") pod \"csi-hostpathplugin-6knjf\" (UID: \"680ec585-d304-48b0-9501-7af7e5bc503b\") " pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.700109 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/680ec585-d304-48b0-9501-7af7e5bc503b-csi-data-dir\") pod \"csi-hostpathplugin-6knjf\" (UID: \"680ec585-d304-48b0-9501-7af7e5bc503b\") " pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.700127 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0edbface-b4a5-4b10-bb00-a7650f2a2b77-proxy-tls\") pod \"machine-config-operator-74547568cd-sxhc8\" (UID: \"0edbface-b4a5-4b10-bb00-a7650f2a2b77\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.700162 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9d4b702c-363c-48e1-aac0-8816682160a6-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-f7vqf\" (UID: \"9d4b702c-363c-48e1-aac0-8816682160a6\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-f7vqf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.700200 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/680ec585-d304-48b0-9501-7af7e5bc503b-plugins-dir\") pod \"csi-hostpathplugin-6knjf\" (UID: \"680ec585-d304-48b0-9501-7af7e5bc503b\") " pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.700031 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/704ebe80-008e-4369-8003-6d264aa6f6dc-audit-dir\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.702690 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.703275 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb3f5110-df96-4946-b0a5-3439ab4e1724-secret-volume\") pod \"collect-profiles-29402235-s44z5\" (UID: \"cb3f5110-df96-4946-b0a5-3439ab4e1724\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.703287 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/dff5628c-810a-4f12-a683-341ebc57530a-etcd-client\") pod \"etcd-operator-b45778765-sdxzx\" (UID: \"dff5628c-810a-4f12-a683-341ebc57530a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.703480 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a54f432f-761c-419d-9c57-654e4f81a28f-metrics-certs\") pod \"router-default-5444994796-lb2sb\" (UID: \"a54f432f-761c-419d-9c57-654e4f81a28f\") " pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.704577 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/a54f432f-761c-419d-9c57-654e4f81a28f-stats-auth\") pod \"router-default-5444994796-lb2sb\" (UID: \"a54f432f-761c-419d-9c57-654e4f81a28f\") " pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.704640 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/559c6fab-fdbb-495f-933a-90a3957ec82c-profile-collector-cert\") pod \"olm-operator-6b444d44fb-wmtpt\" (UID: \"559c6fab-fdbb-495f-933a-90a3957ec82c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wmtpt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.706424 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a2e2ff1f-e326-4743-8c32-e69f868ef14c-profile-collector-cert\") pod \"catalog-operator-68c6474976-jj8jj\" (UID: \"a2e2ff1f-e326-4743-8c32-e69f868ef14c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.706781 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dff5628c-810a-4f12-a683-341ebc57530a-serving-cert\") pod \"etcd-operator-b45778765-sdxzx\" (UID: \"dff5628c-810a-4f12-a683-341ebc57530a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.706913 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0edbface-b4a5-4b10-bb00-a7650f2a2b77-proxy-tls\") pod \"machine-config-operator-74547568cd-sxhc8\" (UID: \"0edbface-b4a5-4b10-bb00-a7650f2a2b77\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.710596 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/559c6fab-fdbb-495f-933a-90a3957ec82c-srv-cert\") pod \"olm-operator-6b444d44fb-wmtpt\" (UID: \"559c6fab-fdbb-495f-933a-90a3957ec82c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wmtpt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.723187 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.743163 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.753072 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/a54f432f-761c-419d-9c57-654e4f81a28f-default-certificate\") pod \"router-default-5444994796-lb2sb\" (UID: \"a54f432f-761c-419d-9c57-654e4f81a28f\") " pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.762595 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.768348 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/704ebe80-008e-4369-8003-6d264aa6f6dc-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.782419 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.801620 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:01 crc kubenswrapper[4871]: E1126 05:28:01.801884 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:02.301848388 +0000 UTC m=+140.484900014 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.803090 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.803234 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 26 05:28:01 crc kubenswrapper[4871]: E1126 05:28:01.803569 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:02.303508949 +0000 UTC m=+140.486560575 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.822255 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.833970 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/704ebe80-008e-4369-8003-6d264aa6f6dc-etcd-client\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.843487 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.854320 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/704ebe80-008e-4369-8003-6d264aa6f6dc-serving-cert\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.863173 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.882550 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.897010 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/66f940f0-55f0-4d12-8376-a997d3c802cd-metrics-tls\") pod \"dns-operator-744455d44c-9sq6t\" (UID: \"66f940f0-55f0-4d12-8376-a997d3c802cd\") " pod="openshift-dns-operator/dns-operator-744455d44c-9sq6t" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.903599 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.903648 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 26 05:28:01 crc kubenswrapper[4871]: E1126 05:28:01.904029 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:02.404000781 +0000 UTC m=+140.587052407 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.905440 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:01 crc kubenswrapper[4871]: E1126 05:28:01.906064 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:02.406046721 +0000 UTC m=+140.589098347 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.922928 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.942560 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.961958 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.967730 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/704ebe80-008e-4369-8003-6d264aa6f6dc-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:01 crc kubenswrapper[4871]: I1126 05:28:01.982305 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.003331 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.006635 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.006868 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:02.50683141 +0000 UTC m=+140.689883046 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.007240 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.007599 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:02.507584129 +0000 UTC m=+140.690635725 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.023499 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.030383 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/704ebe80-008e-4369-8003-6d264aa6f6dc-audit-policies\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.042231 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.046125 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5b6bd3c0-7f03-41d8-bc87-66c374966c21-auth-proxy-config\") pod \"machine-approver-56656f9798-dmkj8\" (UID: \"5b6bd3c0-7f03-41d8-bc87-66c374966c21\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dmkj8" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.063619 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.074451 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/5b6bd3c0-7f03-41d8-bc87-66c374966c21-machine-approver-tls\") pod \"machine-approver-56656f9798-dmkj8\" (UID: \"5b6bd3c0-7f03-41d8-bc87-66c374966c21\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dmkj8" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.082863 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.089124 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b6bd3c0-7f03-41d8-bc87-66c374966c21-config\") pod \"machine-approver-56656f9798-dmkj8\" (UID: \"5b6bd3c0-7f03-41d8-bc87-66c374966c21\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dmkj8" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.102804 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.119819 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.120336 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:02.620290502 +0000 UTC m=+140.803342128 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.120633 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.121319 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:02.621284376 +0000 UTC m=+140.804336002 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.124475 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.142670 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.162364 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.174678 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4edc5fd4-3610-4fa0-bf22-5ee6a41f6589-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-wg5vb\" (UID: \"4edc5fd4-3610-4fa0-bf22-5ee6a41f6589\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wg5vb" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.182699 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.190187 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4edc5fd4-3610-4fa0-bf22-5ee6a41f6589-config\") pod \"machine-api-operator-5694c8668f-wg5vb\" (UID: \"4edc5fd4-3610-4fa0-bf22-5ee6a41f6589\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wg5vb" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.202077 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.209798 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4edc5fd4-3610-4fa0-bf22-5ee6a41f6589-images\") pod \"machine-api-operator-5694c8668f-wg5vb\" (UID: \"4edc5fd4-3610-4fa0-bf22-5ee6a41f6589\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wg5vb" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.222964 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.223388 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.223655 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:02.723624004 +0000 UTC m=+140.906675620 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.224373 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.224917 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:02.724893595 +0000 UTC m=+140.907945221 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.242471 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.253516 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e028ebf2-4eb3-477a-be5d-ce02dd655d8d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-ff9xx\" (UID: \"e028ebf2-4eb3-477a-be5d-ce02dd655d8d\") " pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.263323 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.282830 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.313613 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.320678 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e028ebf2-4eb3-477a-be5d-ce02dd655d8d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-ff9xx\" (UID: \"e028ebf2-4eb3-477a-be5d-ce02dd655d8d\") " pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.322594 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.325562 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.325872 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:02.825841908 +0000 UTC m=+141.008893534 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.326349 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.327334 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:02.827313364 +0000 UTC m=+141.010364990 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.342161 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.353374 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/dd4302fa-1a28-4718-b14c-f85e45519916-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-2pb6b\" (UID: \"dd4302fa-1a28-4718-b14c-f85e45519916\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2pb6b" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.363778 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.380553 4871 request.go:700] Waited for 1.001310777s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver-operator/configmaps?fieldSelector=metadata.name%3Dopenshift-apiserver-operator-config&limit=500&resourceVersion=0 Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.381781 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.389192 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99e7e324-afae-4256-915b-325038c897e4-config\") pod \"openshift-apiserver-operator-796bbdcf4f-kjfwt\" (UID: \"99e7e324-afae-4256-915b-325038c897e4\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kjfwt" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.403016 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.422055 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.427650 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.427926 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:02.927903528 +0000 UTC m=+141.110955134 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.428869 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.429340 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:02.929326493 +0000 UTC m=+141.112378089 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.438513 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/99e7e324-afae-4256-915b-325038c897e4-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-kjfwt\" (UID: \"99e7e324-afae-4256-915b-325038c897e4\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kjfwt" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.443006 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.463246 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.482587 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.503048 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.522881 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.530736 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.530921 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.030900232 +0000 UTC m=+141.213951828 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.532021 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.532431 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.032414539 +0000 UTC m=+141.215466165 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.543623 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.563391 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.584687 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.590925 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/966673ca-eeee-4bc2-84c2-805d4f8f9648-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-t58x9\" (UID: \"966673ca-eeee-4bc2-84c2-805d4f8f9648\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-t58x9" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.602504 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.609032 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/966673ca-eeee-4bc2-84c2-805d4f8f9648-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-t58x9\" (UID: \"966673ca-eeee-4bc2-84c2-805d4f8f9648\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-t58x9" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.623503 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.632863 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.633418 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.133361702 +0000 UTC m=+141.316413298 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.633675 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.634236 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.134224083 +0000 UTC m=+141.317275679 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.660649 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsls8\" (UniqueName: \"kubernetes.io/projected/d2225428-c79d-4406-9238-432797b4fa99-kube-api-access-tsls8\") pod \"controller-manager-879f6c89f-24p5x\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.679594 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rc29k\" (UniqueName: \"kubernetes.io/projected/ba518c6b-5e43-4592-b7ca-e3cfe9ca6681-kube-api-access-rc29k\") pod \"console-operator-58897d9998-ml4vf\" (UID: \"ba518c6b-5e43-4592-b7ca-e3cfe9ca6681\") " pod="openshift-console-operator/console-operator-58897d9998-ml4vf" Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.696437 4871 secret.go:188] Couldn't get secret openshift-machine-config-operator/machine-config-server-tls: failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.696650 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9c115df2-f210-4ff8-a6d8-ffe9d04e739c-certs podName:9c115df2-f210-4ff8-a6d8-ffe9d04e739c nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.196633484 +0000 UTC m=+141.379685070 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "certs" (UniqueName: "kubernetes.io/secret/9c115df2-f210-4ff8-a6d8-ffe9d04e739c-certs") pod "machine-config-server-7n9dd" (UID: "9c115df2-f210-4ff8-a6d8-ffe9d04e739c") : failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.696993 4871 configmap.go:193] Couldn't get configMap openshift-operator-lifecycle-manager/collect-profiles-config: failed to sync configmap cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.697074 4871 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.697132 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/df51ab2b-98e7-4c22-83ba-2bc1f70eaa07-webhook-cert podName:df51ab2b-98e7-4c22-83ba-2bc1f70eaa07 nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.197117836 +0000 UTC m=+141.380169422 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/df51ab2b-98e7-4c22-83ba-2bc1f70eaa07-webhook-cert") pod "packageserver-d55dfcdfc-k95qw" (UID: "df51ab2b-98e7-4c22-83ba-2bc1f70eaa07") : failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.697027 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vm26j\" (UniqueName: \"kubernetes.io/projected/ee091557-58e0-45ce-bf00-f7f2a1b2ebf4-kube-api-access-vm26j\") pod \"apiserver-76f77b778f-ql4w4\" (UID: \"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4\") " pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.697058 4871 secret.go:188] Couldn't get secret openshift-ingress-canary/canary-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.697182 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/851b2861-b400-41ae-9aae-8e041dc4e85a-cert podName:851b2861-b400-41ae-9aae-8e041dc4e85a nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.197174787 +0000 UTC m=+141.380226373 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/851b2861-b400-41ae-9aae-8e041dc4e85a-cert") pod "ingress-canary-rcft8" (UID: "851b2861-b400-41ae-9aae-8e041dc4e85a") : failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.697290 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/cb3f5110-df96-4946-b0a5-3439ab4e1724-config-volume podName:cb3f5110-df96-4946-b0a5-3439ab4e1724 nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.19727927 +0000 UTC m=+141.380330856 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/cb3f5110-df96-4946-b0a5-3439ab4e1724-config-volume") pod "collect-profiles-29402235-s44z5" (UID: "cb3f5110-df96-4946-b0a5-3439ab4e1724") : failed to sync configmap cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.698462 4871 secret.go:188] Couldn't get secret openshift-oauth-apiserver/encryption-config-1: failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.698603 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/704ebe80-008e-4369-8003-6d264aa6f6dc-encryption-config podName:704ebe80-008e-4369-8003-6d264aa6f6dc nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.198590993 +0000 UTC m=+141.381642579 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "encryption-config" (UniqueName: "kubernetes.io/secret/704ebe80-008e-4369-8003-6d264aa6f6dc-encryption-config") pod "apiserver-7bbb656c7d-2bzqb" (UID: "704ebe80-008e-4369-8003-6d264aa6f6dc") : failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.698704 4871 secret.go:188] Couldn't get secret openshift-machine-config-operator/node-bootstrapper-token: failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.698821 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9c115df2-f210-4ff8-a6d8-ffe9d04e739c-node-bootstrap-token podName:9c115df2-f210-4ff8-a6d8-ffe9d04e739c nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.198808158 +0000 UTC m=+141.381859794 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "node-bootstrap-token" (UniqueName: "kubernetes.io/secret/9c115df2-f210-4ff8-a6d8-ffe9d04e739c-node-bootstrap-token") pod "machine-config-server-7n9dd" (UID: "9c115df2-f210-4ff8-a6d8-ffe9d04e739c") : failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.700464 4871 configmap.go:193] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.700560 4871 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.700604 4871 secret.go:188] Couldn't get secret openshift-multus/multus-admission-controller-secret: failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.700662 4871 secret.go:188] Couldn't get secret openshift-service-ca/signing-key: failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.700744 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5245134d-509e-4548-af72-7c1da043b3f4-config podName:5245134d-509e-4548-af72-7c1da043b3f4 nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.200585302 +0000 UTC m=+141.383636888 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/5245134d-509e-4548-af72-7c1da043b3f4-config") pod "service-ca-operator-777779d784-sb6f7" (UID: "5245134d-509e-4548-af72-7c1da043b3f4") : failed to sync configmap cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.701226 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a2e2ff1f-e326-4743-8c32-e69f868ef14c-srv-cert podName:a2e2ff1f-e326-4743-8c32-e69f868ef14c nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.201214097 +0000 UTC m=+141.384265673 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/a2e2ff1f-e326-4743-8c32-e69f868ef14c-srv-cert") pod "catalog-operator-68c6474976-jj8jj" (UID: "a2e2ff1f-e326-4743-8c32-e69f868ef14c") : failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.701323 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d4b702c-363c-48e1-aac0-8816682160a6-webhook-certs podName:9d4b702c-363c-48e1-aac0-8816682160a6 nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.20131519 +0000 UTC m=+141.384366776 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/9d4b702c-363c-48e1-aac0-8816682160a6-webhook-certs") pod "multus-admission-controller-857f4d67dd-f7vqf" (UID: "9d4b702c-363c-48e1-aac0-8816682160a6") : failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.701408 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ff2fc558-90c0-467d-b6c0-b395c9b26998-signing-key podName:ff2fc558-90c0-467d-b6c0-b395c9b26998 nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.201401992 +0000 UTC m=+141.384453578 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/ff2fc558-90c0-467d-b6c0-b395c9b26998-signing-key") pod "service-ca-9c57cc56f-cbq2p" (UID: "ff2fc558-90c0-467d-b6c0-b395c9b26998") : failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.700797 4871 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.702097 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/df51ab2b-98e7-4c22-83ba-2bc1f70eaa07-apiservice-cert podName:df51ab2b-98e7-4c22-83ba-2bc1f70eaa07 nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.201985406 +0000 UTC m=+141.385036992 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/df51ab2b-98e7-4c22-83ba-2bc1f70eaa07-apiservice-cert") pod "packageserver-d55dfcdfc-k95qw" (UID: "df51ab2b-98e7-4c22-83ba-2bc1f70eaa07") : failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.700843 4871 configmap.go:193] Couldn't get configMap openshift-dns/dns-default: failed to sync configmap cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.702342 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/244d9419-a1ed-45ac-9aca-9291a40ed9b3-config-volume podName:244d9419-a1ed-45ac-9aca-9291a40ed9b3 nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.202333055 +0000 UTC m=+141.385384641 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/244d9419-a1ed-45ac-9aca-9291a40ed9b3-config-volume") pod "dns-default-nntmb" (UID: "244d9419-a1ed-45ac-9aca-9291a40ed9b3") : failed to sync configmap cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.700868 4871 secret.go:188] Couldn't get secret openshift-service-ca-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.702549 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5245134d-509e-4548-af72-7c1da043b3f4-serving-cert podName:5245134d-509e-4548-af72-7c1da043b3f4 nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.202519209 +0000 UTC m=+141.385570795 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/5245134d-509e-4548-af72-7c1da043b3f4-serving-cert") pod "service-ca-operator-777779d784-sb6f7" (UID: "5245134d-509e-4548-af72-7c1da043b3f4") : failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.700894 4871 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.703118 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cfdd82f-63f7-4804-af6e-7ec8282bbc92-package-server-manager-serving-cert podName:0cfdd82f-63f7-4804-af6e-7ec8282bbc92 nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.202727635 +0000 UTC m=+141.385779221 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/0cfdd82f-63f7-4804-af6e-7ec8282bbc92-package-server-manager-serving-cert") pod "package-server-manager-789f6589d5-rwdp7" (UID: "0cfdd82f-63f7-4804-af6e-7ec8282bbc92") : failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.701074 4871 secret.go:188] Couldn't get secret openshift-dns/dns-default-metrics-tls: failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.701105 4871 configmap.go:193] Couldn't get configMap openshift-service-ca/signing-cabundle: failed to sync configmap cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.703472 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/244d9419-a1ed-45ac-9aca-9291a40ed9b3-metrics-tls podName:244d9419-a1ed-45ac-9aca-9291a40ed9b3 nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.203329909 +0000 UTC m=+141.386381565 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/244d9419-a1ed-45ac-9aca-9291a40ed9b3-metrics-tls") pod "dns-default-nntmb" (UID: "244d9419-a1ed-45ac-9aca-9291a40ed9b3") : failed to sync secret cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.703620 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ff2fc558-90c0-467d-b6c0-b395c9b26998-signing-cabundle podName:ff2fc558-90c0-467d-b6c0-b395c9b26998 nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.203609416 +0000 UTC m=+141.386661042 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/ff2fc558-90c0-467d-b6c0-b395c9b26998-signing-cabundle") pod "service-ca-9c57cc56f-cbq2p" (UID: "ff2fc558-90c0-467d-b6c0-b395c9b26998") : failed to sync configmap cache: timed out waiting for the condition Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.705240 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.722067 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rq7fb\" (UniqueName: \"kubernetes.io/projected/ed9c6e5d-b580-43f0-8741-1082cb1b9caa-kube-api-access-rq7fb\") pod \"cluster-image-registry-operator-dc59b4c8b-kb6mv\" (UID: \"ed9c6e5d-b580-43f0-8741-1082cb1b9caa\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.722646 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.725269 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.735217 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.735341 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.23532493 +0000 UTC m=+141.418376516 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.735669 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.735953 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.235943065 +0000 UTC m=+141.418994651 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.759701 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-ml4vf" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.760648 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jhl8\" (UniqueName: \"kubernetes.io/projected/6bece945-b45b-4d5d-aa90-23400b5267d3-kube-api-access-8jhl8\") pod \"openshift-config-operator-7777fb866f-6zcbt\" (UID: \"6bece945-b45b-4d5d-aa90-23400b5267d3\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.776576 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ed9c6e5d-b580-43f0-8741-1082cb1b9caa-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-kb6mv\" (UID: \"ed9c6e5d-b580-43f0-8741-1082cb1b9caa\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.780393 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.802168 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8v9k\" (UniqueName: \"kubernetes.io/projected/5b7f9021-d531-4dda-89c5-57eff16b24ec-kube-api-access-s8v9k\") pod \"cluster-samples-operator-665b6dd947-zb8wp\" (UID: \"5b7f9021-d531-4dda-89c5-57eff16b24ec\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-zb8wp" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.821806 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmq95\" (UniqueName: \"kubernetes.io/projected/65ed678d-1457-46e2-a59d-1b05e7bbee8c-kube-api-access-dmq95\") pod \"console-f9d7485db-h5qx5\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.822880 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.837152 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.837679 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.337652686 +0000 UTC m=+141.520704272 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.837809 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.838331 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.338312103 +0000 UTC m=+141.521363699 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.843806 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.862743 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.882805 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.902388 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.915833 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-24p5x"] Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.922376 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 26 05:28:02 crc kubenswrapper[4871]: W1126 05:28:02.928088 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd2225428_c79d_4406_9238_432797b4fa99.slice/crio-3466e5288e80802867c08c8efcbcd3ab215d8fc4bed70d78f4226339d06fccc8 WatchSource:0}: Error finding container 3466e5288e80802867c08c8efcbcd3ab215d8fc4bed70d78f4226339d06fccc8: Status 404 returned error can't find the container with id 3466e5288e80802867c08c8efcbcd3ab215d8fc4bed70d78f4226339d06fccc8 Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.935801 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-ql4w4"] Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.938675 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.938838 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.438813135 +0000 UTC m=+141.621864711 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.938874 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:02 crc kubenswrapper[4871]: E1126 05:28:02.939266 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.439259026 +0000 UTC m=+141.622310612 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.942175 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 26 05:28:02 crc kubenswrapper[4871]: W1126 05:28:02.948802 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podee091557_58e0_45ce_bf00_f7f2a1b2ebf4.slice/crio-6a1a500a6054178a7e21f2ade42214b4079b40ca9cd07bd4f6491998d278dbb5 WatchSource:0}: Error finding container 6a1a500a6054178a7e21f2ade42214b4079b40ca9cd07bd4f6491998d278dbb5: Status 404 returned error can't find the container with id 6a1a500a6054178a7e21f2ade42214b4079b40ca9cd07bd4f6491998d278dbb5 Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.960769 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-ml4vf"] Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.962052 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 26 05:28:02 crc kubenswrapper[4871]: I1126 05:28:02.983421 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 26 05:28:02 crc kubenswrapper[4871]: W1126 05:28:02.986206 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podba518c6b_5e43_4592_b7ca_e3cfe9ca6681.slice/crio-fccdd6181d32ddc95a42572d33fa2ef92b7e2dca1daa030595a4ed12bb6b53cd WatchSource:0}: Error finding container fccdd6181d32ddc95a42572d33fa2ef92b7e2dca1daa030595a4ed12bb6b53cd: Status 404 returned error can't find the container with id fccdd6181d32ddc95a42572d33fa2ef92b7e2dca1daa030595a4ed12bb6b53cd Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:02.999794 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt"] Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.005475 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 26 05:28:03 crc kubenswrapper[4871]: W1126 05:28:03.013282 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6bece945_b45b_4d5d_aa90_23400b5267d3.slice/crio-d7d474df9d8074086c4381a63b674da7bf99d6184676a76ae1bddb127bb8a162 WatchSource:0}: Error finding container d7d474df9d8074086c4381a63b674da7bf99d6184676a76ae1bddb127bb8a162: Status 404 returned error can't find the container with id d7d474df9d8074086c4381a63b674da7bf99d6184676a76ae1bddb127bb8a162 Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.014398 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-zb8wp" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.022440 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.038323 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.040126 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:03 crc kubenswrapper[4871]: E1126 05:28:03.040266 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.540247769 +0000 UTC m=+141.723299355 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.040658 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:03 crc kubenswrapper[4871]: E1126 05:28:03.040884 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.540876415 +0000 UTC m=+141.723928001 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.042810 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.052500 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.062512 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.082475 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.122713 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.141151 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:03 crc kubenswrapper[4871]: E1126 05:28:03.141849 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.641834418 +0000 UTC m=+141.824886004 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.142305 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.163081 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.182128 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.201860 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.214419 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-zb8wp"] Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.222172 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.235351 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv"] Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.242304 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.243147 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/9c115df2-f210-4ff8-a6d8-ffe9d04e739c-node-bootstrap-token\") pod \"machine-config-server-7n9dd\" (UID: \"9c115df2-f210-4ff8-a6d8-ffe9d04e739c\") " pod="openshift-machine-config-operator/machine-config-server-7n9dd" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.243192 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a2e2ff1f-e326-4743-8c32-e69f868ef14c-srv-cert\") pod \"catalog-operator-68c6474976-jj8jj\" (UID: \"a2e2ff1f-e326-4743-8c32-e69f868ef14c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.243216 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb3f5110-df96-4946-b0a5-3439ab4e1724-config-volume\") pod \"collect-profiles-29402235-s44z5\" (UID: \"cb3f5110-df96-4946-b0a5-3439ab4e1724\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.243317 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/0cfdd82f-63f7-4804-af6e-7ec8282bbc92-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-rwdp7\" (UID: \"0cfdd82f-63f7-4804-af6e-7ec8282bbc92\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rwdp7" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.243341 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/704ebe80-008e-4369-8003-6d264aa6f6dc-encryption-config\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.243378 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.243418 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/244d9419-a1ed-45ac-9aca-9291a40ed9b3-metrics-tls\") pod \"dns-default-nntmb\" (UID: \"244d9419-a1ed-45ac-9aca-9291a40ed9b3\") " pod="openshift-dns/dns-default-nntmb" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.243441 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/ff2fc558-90c0-467d-b6c0-b395c9b26998-signing-key\") pod \"service-ca-9c57cc56f-cbq2p\" (UID: \"ff2fc558-90c0-467d-b6c0-b395c9b26998\") " pod="openshift-service-ca/service-ca-9c57cc56f-cbq2p" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.243457 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/ff2fc558-90c0-467d-b6c0-b395c9b26998-signing-cabundle\") pod \"service-ca-9c57cc56f-cbq2p\" (UID: \"ff2fc558-90c0-467d-b6c0-b395c9b26998\") " pod="openshift-service-ca/service-ca-9c57cc56f-cbq2p" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.243503 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5245134d-509e-4548-af72-7c1da043b3f4-serving-cert\") pod \"service-ca-operator-777779d784-sb6f7\" (UID: \"5245134d-509e-4548-af72-7c1da043b3f4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sb6f7" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.243519 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5245134d-509e-4548-af72-7c1da043b3f4-config\") pod \"service-ca-operator-777779d784-sb6f7\" (UID: \"5245134d-509e-4548-af72-7c1da043b3f4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sb6f7" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.243585 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/244d9419-a1ed-45ac-9aca-9291a40ed9b3-config-volume\") pod \"dns-default-nntmb\" (UID: \"244d9419-a1ed-45ac-9aca-9291a40ed9b3\") " pod="openshift-dns/dns-default-nntmb" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.243607 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9d4b702c-363c-48e1-aac0-8816682160a6-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-f7vqf\" (UID: \"9d4b702c-363c-48e1-aac0-8816682160a6\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-f7vqf" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.243654 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/9c115df2-f210-4ff8-a6d8-ffe9d04e739c-certs\") pod \"machine-config-server-7n9dd\" (UID: \"9c115df2-f210-4ff8-a6d8-ffe9d04e739c\") " pod="openshift-machine-config-operator/machine-config-server-7n9dd" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.243673 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/df51ab2b-98e7-4c22-83ba-2bc1f70eaa07-apiservice-cert\") pod \"packageserver-d55dfcdfc-k95qw\" (UID: \"df51ab2b-98e7-4c22-83ba-2bc1f70eaa07\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.244629 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/df51ab2b-98e7-4c22-83ba-2bc1f70eaa07-webhook-cert\") pod \"packageserver-d55dfcdfc-k95qw\" (UID: \"df51ab2b-98e7-4c22-83ba-2bc1f70eaa07\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.244669 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/851b2861-b400-41ae-9aae-8e041dc4e85a-cert\") pod \"ingress-canary-rcft8\" (UID: \"851b2861-b400-41ae-9aae-8e041dc4e85a\") " pod="openshift-ingress-canary/ingress-canary-rcft8" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.244748 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/244d9419-a1ed-45ac-9aca-9291a40ed9b3-config-volume\") pod \"dns-default-nntmb\" (UID: \"244d9419-a1ed-45ac-9aca-9291a40ed9b3\") " pod="openshift-dns/dns-default-nntmb" Nov 26 05:28:03 crc kubenswrapper[4871]: E1126 05:28:03.244817 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.744801521 +0000 UTC m=+141.927853097 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.245254 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5245134d-509e-4548-af72-7c1da043b3f4-config\") pod \"service-ca-operator-777779d784-sb6f7\" (UID: \"5245134d-509e-4548-af72-7c1da043b3f4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sb6f7" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.246385 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/ff2fc558-90c0-467d-b6c0-b395c9b26998-signing-cabundle\") pod \"service-ca-9c57cc56f-cbq2p\" (UID: \"ff2fc558-90c0-467d-b6c0-b395c9b26998\") " pod="openshift-service-ca/service-ca-9c57cc56f-cbq2p" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.247416 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb3f5110-df96-4946-b0a5-3439ab4e1724-config-volume\") pod \"collect-profiles-29402235-s44z5\" (UID: \"cb3f5110-df96-4946-b0a5-3439ab4e1724\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.248838 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/704ebe80-008e-4369-8003-6d264aa6f6dc-encryption-config\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.249192 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5245134d-509e-4548-af72-7c1da043b3f4-serving-cert\") pod \"service-ca-operator-777779d784-sb6f7\" (UID: \"5245134d-509e-4548-af72-7c1da043b3f4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sb6f7" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.249287 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/0cfdd82f-63f7-4804-af6e-7ec8282bbc92-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-rwdp7\" (UID: \"0cfdd82f-63f7-4804-af6e-7ec8282bbc92\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rwdp7" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.249450 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/244d9419-a1ed-45ac-9aca-9291a40ed9b3-metrics-tls\") pod \"dns-default-nntmb\" (UID: \"244d9419-a1ed-45ac-9aca-9291a40ed9b3\") " pod="openshift-dns/dns-default-nntmb" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.249487 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/851b2861-b400-41ae-9aae-8e041dc4e85a-cert\") pod \"ingress-canary-rcft8\" (UID: \"851b2861-b400-41ae-9aae-8e041dc4e85a\") " pod="openshift-ingress-canary/ingress-canary-rcft8" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.249896 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/ff2fc558-90c0-467d-b6c0-b395c9b26998-signing-key\") pod \"service-ca-9c57cc56f-cbq2p\" (UID: \"ff2fc558-90c0-467d-b6c0-b395c9b26998\") " pod="openshift-service-ca/service-ca-9c57cc56f-cbq2p" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.249967 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a2e2ff1f-e326-4743-8c32-e69f868ef14c-srv-cert\") pod \"catalog-operator-68c6474976-jj8jj\" (UID: \"a2e2ff1f-e326-4743-8c32-e69f868ef14c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.250027 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/df51ab2b-98e7-4c22-83ba-2bc1f70eaa07-apiservice-cert\") pod \"packageserver-d55dfcdfc-k95qw\" (UID: \"df51ab2b-98e7-4c22-83ba-2bc1f70eaa07\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.251178 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9d4b702c-363c-48e1-aac0-8816682160a6-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-f7vqf\" (UID: \"9d4b702c-363c-48e1-aac0-8816682160a6\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-f7vqf" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.252670 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/df51ab2b-98e7-4c22-83ba-2bc1f70eaa07-webhook-cert\") pod \"packageserver-d55dfcdfc-k95qw\" (UID: \"df51ab2b-98e7-4c22-83ba-2bc1f70eaa07\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.258606 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-h5qx5"] Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.264823 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.282756 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.302062 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.323222 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.343240 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.346788 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:03 crc kubenswrapper[4871]: E1126 05:28:03.347343 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.847319572 +0000 UTC m=+142.030371188 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.347574 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:03 crc kubenswrapper[4871]: E1126 05:28:03.348012 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.847997109 +0000 UTC m=+142.031048765 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.363467 4871 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.381180 4871 request.go:700] Waited for 1.894744629s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/serviceaccounts/route-controller-manager-sa/token Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.398117 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbtkx\" (UniqueName: \"kubernetes.io/projected/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-kube-api-access-hbtkx\") pod \"route-controller-manager-6576b87f9c-68sd6\" (UID: \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.401962 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.427102 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9e37bbf9-7c3e-431d-a8af-dd7ca13730e5-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zhpbn\" (UID: \"9e37bbf9-7c3e-431d-a8af-dd7ca13730e5\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zhpbn" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.437201 4871 generic.go:334] "Generic (PLEG): container finished" podID="6bece945-b45b-4d5d-aa90-23400b5267d3" containerID="3eb0524dedc4006845c922a408c0f4802b66694e6f06017437e724a8575075d8" exitCode=0 Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.437251 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt" event={"ID":"6bece945-b45b-4d5d-aa90-23400b5267d3","Type":"ContainerDied","Data":"3eb0524dedc4006845c922a408c0f4802b66694e6f06017437e724a8575075d8"} Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.437312 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt" event={"ID":"6bece945-b45b-4d5d-aa90-23400b5267d3","Type":"ContainerStarted","Data":"d7d474df9d8074086c4381a63b674da7bf99d6184676a76ae1bddb127bb8a162"} Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.438719 4871 generic.go:334] "Generic (PLEG): container finished" podID="ee091557-58e0-45ce-bf00-f7f2a1b2ebf4" containerID="b0ad7695b7b479b06050a08e6f4d35a3e1320bd8659712cc3ca0c76e2b4f8f86" exitCode=0 Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.438763 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" event={"ID":"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4","Type":"ContainerDied","Data":"b0ad7695b7b479b06050a08e6f4d35a3e1320bd8659712cc3ca0c76e2b4f8f86"} Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.438778 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" event={"ID":"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4","Type":"ContainerStarted","Data":"6a1a500a6054178a7e21f2ade42214b4079b40ca9cd07bd4f6491998d278dbb5"} Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.439987 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-669w4\" (UniqueName: \"kubernetes.io/projected/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-kube-api-access-669w4\") pod \"oauth-openshift-558db77b4-pz8qb\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.442821 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.444368 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv" event={"ID":"ed9c6e5d-b580-43f0-8741-1082cb1b9caa","Type":"ContainerStarted","Data":"403cf2a351a6723be4c6530031c64adc99e3e0fdfe205898317891259faf3120"} Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.444408 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv" event={"ID":"ed9c6e5d-b580-43f0-8741-1082cb1b9caa","Type":"ContainerStarted","Data":"ac31e1125a6baf23ab088748d90b693b6121138d34879cfbd16699fc0aa82848"} Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.448793 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:03 crc kubenswrapper[4871]: E1126 05:28:03.450267 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:03.950246614 +0000 UTC m=+142.133298200 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.450438 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-zb8wp" event={"ID":"5b7f9021-d531-4dda-89c5-57eff16b24ec","Type":"ContainerStarted","Data":"acdf74ef859ed0a7f8c2c06b238573efd48febda8205cd9f83e7f90ee76cb9bf"} Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.450862 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/9c115df2-f210-4ff8-a6d8-ffe9d04e739c-node-bootstrap-token\") pod \"machine-config-server-7n9dd\" (UID: \"9c115df2-f210-4ff8-a6d8-ffe9d04e739c\") " pod="openshift-machine-config-operator/machine-config-server-7n9dd" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.453196 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" event={"ID":"d2225428-c79d-4406-9238-432797b4fa99","Type":"ContainerStarted","Data":"ccb3768cec333d72897dc5ba74bca2476110333f7ec4d786b27549de3f5038bd"} Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.453231 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" event={"ID":"d2225428-c79d-4406-9238-432797b4fa99","Type":"ContainerStarted","Data":"3466e5288e80802867c08c8efcbcd3ab215d8fc4bed70d78f4226339d06fccc8"} Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.454083 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.456023 4871 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-24p5x container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.456071 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" podUID="d2225428-c79d-4406-9238-432797b4fa99" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.456215 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-h5qx5" event={"ID":"65ed678d-1457-46e2-a59d-1b05e7bbee8c","Type":"ContainerStarted","Data":"f0bd8a75b07e7762bd46d4350707a542d76fa93c75c8d1f1b9d1f5d31800a22d"} Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.456244 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-h5qx5" event={"ID":"65ed678d-1457-46e2-a59d-1b05e7bbee8c","Type":"ContainerStarted","Data":"476b0f3d190fb318427a04881c97d5d3021290588e6ea5689a0b7ce02e426b29"} Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.458200 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-ml4vf" event={"ID":"ba518c6b-5e43-4592-b7ca-e3cfe9ca6681","Type":"ContainerStarted","Data":"fab97d42619ae5a1592a738eea638e0a4e0092a49f657205682972f302fe653c"} Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.458234 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-ml4vf" event={"ID":"ba518c6b-5e43-4592-b7ca-e3cfe9ca6681","Type":"ContainerStarted","Data":"fccdd6181d32ddc95a42572d33fa2ef92b7e2dca1daa030595a4ed12bb6b53cd"} Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.458803 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-ml4vf" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.462797 4871 patch_prober.go:28] interesting pod/console-operator-58897d9998-ml4vf container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/readyz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.462886 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-ml4vf" podUID="ba518c6b-5e43-4592-b7ca-e3cfe9ca6681" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/readyz\": dial tcp 10.217.0.20:8443: connect: connection refused" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.466038 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.482232 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.485797 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zhpbn" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.496483 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/9c115df2-f210-4ff8-a6d8-ffe9d04e739c-certs\") pod \"machine-config-server-7n9dd\" (UID: \"9c115df2-f210-4ff8-a6d8-ffe9d04e739c\") " pod="openshift-machine-config-operator/machine-config-server-7n9dd" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.554447 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:03 crc kubenswrapper[4871]: E1126 05:28:03.559356 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:04.059339347 +0000 UTC m=+142.242391043 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.569861 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fdrl\" (UniqueName: \"kubernetes.io/projected/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-kube-api-access-9fdrl\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.580044 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/140ee3ed-e8e4-42c9-b520-8be36158fd66-bound-sa-token\") pod \"ingress-operator-5b745b69d9-pkt8f\" (UID: \"140ee3ed-e8e4-42c9-b520-8be36158fd66\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.582150 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwk27\" (UniqueName: \"kubernetes.io/projected/7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42-kube-api-access-dwk27\") pod \"machine-config-controller-84d6567774-qx4hc\" (UID: \"7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qx4hc" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.589254 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8hzs\" (UniqueName: \"kubernetes.io/projected/317fac77-edf3-46a5-9635-1dd8bb83fea6-kube-api-access-z8hzs\") pod \"downloads-7954f5f757-rdvkb\" (UID: \"317fac77-edf3-46a5-9635-1dd8bb83fea6\") " pod="openshift-console/downloads-7954f5f757-rdvkb" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.607147 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ba736a92-4399-4f3b-bcc9-fa7a6b30f953-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-h6n6d\" (UID: \"ba736a92-4399-4f3b-bcc9-fa7a6b30f953\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-h6n6d" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.616343 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6l24c\" (UniqueName: \"kubernetes.io/projected/140ee3ed-e8e4-42c9-b520-8be36158fd66-kube-api-access-6l24c\") pod \"ingress-operator-5b745b69d9-pkt8f\" (UID: \"140ee3ed-e8e4-42c9-b520-8be36158fd66\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.640898 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njmnj\" (UniqueName: \"kubernetes.io/projected/eb62a12d-ae78-45e2-b32d-126d3643260d-kube-api-access-njmnj\") pod \"openshift-controller-manager-operator-756b6f6bc6-n27d9\" (UID: \"eb62a12d-ae78-45e2-b32d-126d3643260d\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-n27d9" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.646560 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6"] Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.657223 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:03 crc kubenswrapper[4871]: E1126 05:28:03.658141 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:04.158124796 +0000 UTC m=+142.341176382 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.658164 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/da210515-1701-4ea1-ab3c-4407b119277e-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-vwk2r\" (UID: \"da210515-1701-4ea1-ab3c-4407b119277e\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vwk2r" Nov 26 05:28:03 crc kubenswrapper[4871]: W1126 05:28:03.675220 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod10c1cc5f_f24d_434a_a9dd_0e0d8d22c153.slice/crio-f5b62bfabe3d1b438ef66fb79aa859b37e7683e3450be6f7c2f76cd69d086bcb WatchSource:0}: Error finding container f5b62bfabe3d1b438ef66fb79aa859b37e7683e3450be6f7c2f76cd69d086bcb: Status 404 returned error can't find the container with id f5b62bfabe3d1b438ef66fb79aa859b37e7683e3450be6f7c2f76cd69d086bcb Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.682790 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8pwb\" (UniqueName: \"kubernetes.io/projected/54870e81-ef64-4e07-9190-1bffb7e6db6c-kube-api-access-z8pwb\") pod \"authentication-operator-69f744f599-jgrtb\" (UID: \"54870e81-ef64-4e07-9190-1bffb7e6db6c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.695878 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-bound-sa-token\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.712853 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.721689 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knzph\" (UniqueName: \"kubernetes.io/projected/99e7e324-afae-4256-915b-325038c897e4-kube-api-access-knzph\") pod \"openshift-apiserver-operator-796bbdcf4f-kjfwt\" (UID: \"99e7e324-afae-4256-915b-325038c897e4\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kjfwt" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.746412 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktn5v\" (UniqueName: \"kubernetes.io/projected/df51ab2b-98e7-4c22-83ba-2bc1f70eaa07-kube-api-access-ktn5v\") pod \"packageserver-d55dfcdfc-k95qw\" (UID: \"df51ab2b-98e7-4c22-83ba-2bc1f70eaa07\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.760362 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:03 crc kubenswrapper[4871]: E1126 05:28:03.760748 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:04.26073261 +0000 UTC m=+142.443784196 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.767233 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxw5m\" (UniqueName: \"kubernetes.io/projected/bfda4547-4814-4d32-ba43-b3ffc061bf81-kube-api-access-hxw5m\") pod \"migrator-59844c95c7-b8k6z\" (UID: \"bfda4547-4814-4d32-ba43-b3ffc061bf81\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-b8k6z" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.770126 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.776586 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-n27d9" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.779881 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ddxm\" (UniqueName: \"kubernetes.io/projected/851b2861-b400-41ae-9aae-8e041dc4e85a-kube-api-access-5ddxm\") pod \"ingress-canary-rcft8\" (UID: \"851b2861-b400-41ae-9aae-8e041dc4e85a\") " pod="openshift-ingress-canary/ingress-canary-rcft8" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.787363 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zhpbn"] Nov 26 05:28:03 crc kubenswrapper[4871]: W1126 05:28:03.801486 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9e37bbf9_7c3e_431d_a8af_dd7ca13730e5.slice/crio-39aa764e81cbefd39c547a2238bfeb85c3ba62f6c219a2b584dc6cb14d17430c WatchSource:0}: Error finding container 39aa764e81cbefd39c547a2238bfeb85c3ba62f6c219a2b584dc6cb14d17430c: Status 404 returned error can't find the container with id 39aa764e81cbefd39c547a2238bfeb85c3ba62f6c219a2b584dc6cb14d17430c Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.810083 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqqwt\" (UniqueName: \"kubernetes.io/projected/680ec585-d304-48b0-9501-7af7e5bc503b-kube-api-access-cqqwt\") pod \"csi-hostpathplugin-6knjf\" (UID: \"680ec585-d304-48b0-9501-7af7e5bc503b\") " pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.817350 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79sbk\" (UniqueName: \"kubernetes.io/projected/0edbface-b4a5-4b10-bb00-a7650f2a2b77-kube-api-access-79sbk\") pod \"machine-config-operator-74547568cd-sxhc8\" (UID: \"0edbface-b4a5-4b10-bb00-a7650f2a2b77\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.852235 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p887w\" (UniqueName: \"kubernetes.io/projected/66f940f0-55f0-4d12-8376-a997d3c802cd-kube-api-access-p887w\") pod \"dns-operator-744455d44c-9sq6t\" (UID: \"66f940f0-55f0-4d12-8376-a997d3c802cd\") " pod="openshift-dns-operator/dns-operator-744455d44c-9sq6t" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.858706 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.859809 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lxqhg\" (UniqueName: \"kubernetes.io/projected/0cfdd82f-63f7-4804-af6e-7ec8282bbc92-kube-api-access-lxqhg\") pod \"package-server-manager-789f6589d5-rwdp7\" (UID: \"0cfdd82f-63f7-4804-af6e-7ec8282bbc92\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rwdp7" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.861638 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:03 crc kubenswrapper[4871]: E1126 05:28:03.861920 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:04.361901918 +0000 UTC m=+142.544953514 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.862070 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:03 crc kubenswrapper[4871]: E1126 05:28:03.862462 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:04.362451742 +0000 UTC m=+142.545503328 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.870264 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-rdvkb" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.876377 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-h6n6d" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.881123 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qx4hc" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.883114 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2db4\" (UniqueName: \"kubernetes.io/projected/4edc5fd4-3610-4fa0-bf22-5ee6a41f6589-kube-api-access-j2db4\") pod \"machine-api-operator-5694c8668f-wg5vb\" (UID: \"4edc5fd4-3610-4fa0-bf22-5ee6a41f6589\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-wg5vb" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.889034 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vwk2r" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.901042 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cbp48\" (UniqueName: \"kubernetes.io/projected/a2e2ff1f-e326-4743-8c32-e69f868ef14c-kube-api-access-cbp48\") pod \"catalog-operator-68c6474976-jj8jj\" (UID: \"a2e2ff1f-e326-4743-8c32-e69f868ef14c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.918080 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.921579 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bks9\" (UniqueName: \"kubernetes.io/projected/966673ca-eeee-4bc2-84c2-805d4f8f9648-kube-api-access-7bks9\") pod \"kube-storage-version-migrator-operator-b67b599dd-t58x9\" (UID: \"966673ca-eeee-4bc2-84c2-805d4f8f9648\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-t58x9" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.939333 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gp6mq\" (UniqueName: \"kubernetes.io/projected/5245134d-509e-4548-af72-7c1da043b3f4-kube-api-access-gp6mq\") pod \"service-ca-operator-777779d784-sb6f7\" (UID: \"5245134d-509e-4548-af72-7c1da043b3f4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-sb6f7" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.953498 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-9sq6t" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.960065 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-wg5vb" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.961976 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bppw\" (UniqueName: \"kubernetes.io/projected/559c6fab-fdbb-495f-933a-90a3957ec82c-kube-api-access-5bppw\") pod \"olm-operator-6b444d44fb-wmtpt\" (UID: \"559c6fab-fdbb-495f-933a-90a3957ec82c\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wmtpt" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.963169 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:03 crc kubenswrapper[4871]: E1126 05:28:03.963776 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:04.463756844 +0000 UTC m=+142.646808440 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.977928 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pz8qb"] Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.978540 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kjfwt" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.982234 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7m4f\" (UniqueName: \"kubernetes.io/projected/9d4b702c-363c-48e1-aac0-8816682160a6-kube-api-access-t7m4f\") pod \"multus-admission-controller-857f4d67dd-f7vqf\" (UID: \"9d4b702c-363c-48e1-aac0-8816682160a6\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-f7vqf" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.986386 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-b8k6z" Nov 26 05:28:03 crc kubenswrapper[4871]: I1126 05:28:03.993121 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-t58x9" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.001001 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-f7vqf" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.003857 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4jwv\" (UniqueName: \"kubernetes.io/projected/dff5628c-810a-4f12-a683-341ebc57530a-kube-api-access-k4jwv\") pod \"etcd-operator-b45778765-sdxzx\" (UID: \"dff5628c-810a-4f12-a683-341ebc57530a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.007167 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-sb6f7" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.019641 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7jgr\" (UniqueName: \"kubernetes.io/projected/5b6bd3c0-7f03-41d8-bc87-66c374966c21-kube-api-access-h7jgr\") pod \"machine-approver-56656f9798-dmkj8\" (UID: \"5b6bd3c0-7f03-41d8-bc87-66c374966c21\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dmkj8" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.020947 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.034741 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rwdp7" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.040825 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.042130 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxtxk\" (UniqueName: \"kubernetes.io/projected/e028ebf2-4eb3-477a-be5d-ce02dd655d8d-kube-api-access-fxtxk\") pod \"marketplace-operator-79b997595-ff9xx\" (UID: \"e028ebf2-4eb3-477a-be5d-ce02dd655d8d\") " pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.053610 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-rcft8" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.058715 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dw6t7\" (UniqueName: \"kubernetes.io/projected/704ebe80-008e-4369-8003-6d264aa6f6dc-kube-api-access-dw6t7\") pod \"apiserver-7bbb656c7d-2bzqb\" (UID: \"704ebe80-008e-4369-8003-6d264aa6f6dc\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.065393 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:04 crc kubenswrapper[4871]: E1126 05:28:04.065691 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:04.56568023 +0000 UTC m=+142.748731806 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.074074 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-6knjf" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.093855 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5dfs\" (UniqueName: \"kubernetes.io/projected/9c115df2-f210-4ff8-a6d8-ffe9d04e739c-kube-api-access-m5dfs\") pod \"machine-config-server-7n9dd\" (UID: \"9c115df2-f210-4ff8-a6d8-ffe9d04e739c\") " pod="openshift-machine-config-operator/machine-config-server-7n9dd" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.104716 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wfvk\" (UniqueName: \"kubernetes.io/projected/244d9419-a1ed-45ac-9aca-9291a40ed9b3-kube-api-access-5wfvk\") pod \"dns-default-nntmb\" (UID: \"244d9419-a1ed-45ac-9aca-9291a40ed9b3\") " pod="openshift-dns/dns-default-nntmb" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.119270 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtbjd\" (UniqueName: \"kubernetes.io/projected/dd4302fa-1a28-4718-b14c-f85e45519916-kube-api-access-qtbjd\") pod \"control-plane-machine-set-operator-78cbb6b69f-2pb6b\" (UID: \"dd4302fa-1a28-4718-b14c-f85e45519916\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2pb6b" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.140366 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwn4k\" (UniqueName: \"kubernetes.io/projected/cb3f5110-df96-4946-b0a5-3439ab4e1724-kube-api-access-kwn4k\") pod \"collect-profiles-29402235-s44z5\" (UID: \"cb3f5110-df96-4946-b0a5-3439ab4e1724\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.166782 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:04 crc kubenswrapper[4871]: E1126 05:28:04.167143 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:04.667124945 +0000 UTC m=+142.850176531 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.178190 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9swz\" (UniqueName: \"kubernetes.io/projected/ff2fc558-90c0-467d-b6c0-b395c9b26998-kube-api-access-d9swz\") pod \"service-ca-9c57cc56f-cbq2p\" (UID: \"ff2fc558-90c0-467d-b6c0-b395c9b26998\") " pod="openshift-service-ca/service-ca-9c57cc56f-cbq2p" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.198029 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nr2hc\" (UniqueName: \"kubernetes.io/projected/a54f432f-761c-419d-9c57-654e4f81a28f-kube-api-access-nr2hc\") pod \"router-default-5444994796-lb2sb\" (UID: \"a54f432f-761c-419d-9c57-654e4f81a28f\") " pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.210093 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.221963 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-7n9dd" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.227704 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.238155 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wmtpt" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.243627 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.250998 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dmkj8" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.264071 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-n27d9"] Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.266516 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.268301 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:04 crc kubenswrapper[4871]: E1126 05:28:04.268849 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:04.768834337 +0000 UTC m=+142.951885923 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.271397 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2pb6b" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.290172 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-jgrtb"] Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.319912 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-cbq2p" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.328913 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.346205 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-nntmb" Nov 26 05:28:04 crc kubenswrapper[4871]: W1126 05:28:04.359492 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda54f432f_761c_419d_9c57_654e4f81a28f.slice/crio-c340719eb45e423b2e0fe12d27e243ad4560e98235828593383532d30acd13dd WatchSource:0}: Error finding container c340719eb45e423b2e0fe12d27e243ad4560e98235828593383532d30acd13dd: Status 404 returned error can't find the container with id c340719eb45e423b2e0fe12d27e243ad4560e98235828593383532d30acd13dd Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.370376 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:04 crc kubenswrapper[4871]: E1126 05:28:04.370734 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:04.870705803 +0000 UTC m=+143.053757389 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.473018 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:04 crc kubenswrapper[4871]: E1126 05:28:04.473368 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:04.973356368 +0000 UTC m=+143.156407954 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.501251 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-h6n6d"] Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.505760 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vwk2r"] Nov 26 05:28:04 crc kubenswrapper[4871]: W1126 05:28:04.524733 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod54870e81_ef64_4e07_9190_1bffb7e6db6c.slice/crio-a34004b7dd5c7a05d9713329cfee6579c471accf9af47b8d79e014b484717464 WatchSource:0}: Error finding container a34004b7dd5c7a05d9713329cfee6579c471accf9af47b8d79e014b484717464: Status 404 returned error can't find the container with id a34004b7dd5c7a05d9713329cfee6579c471accf9af47b8d79e014b484717464 Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.577826 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:04 crc kubenswrapper[4871]: E1126 05:28:04.578135 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:05.078114584 +0000 UTC m=+143.261166170 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.579039 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:04 crc kubenswrapper[4871]: E1126 05:28:04.579958 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:05.079935959 +0000 UTC m=+143.262987545 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:04 crc kubenswrapper[4871]: W1126 05:28:04.622424 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeb62a12d_ae78_45e2_b32d_126d3643260d.slice/crio-6aafcc26269f5fa7ea346db89edafec5b758151b46328182f3c1cf39d2edff71 WatchSource:0}: Error finding container 6aafcc26269f5fa7ea346db89edafec5b758151b46328182f3c1cf39d2edff71: Status 404 returned error can't find the container with id 6aafcc26269f5fa7ea346db89edafec5b758151b46328182f3c1cf39d2edff71 Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.622977 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zhpbn" event={"ID":"9e37bbf9-7c3e-431d-a8af-dd7ca13730e5","Type":"ContainerStarted","Data":"9fb6301dd34e55e16122946714dbced6add8394069c6b205bb212da363e02589"} Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.623024 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zhpbn" event={"ID":"9e37bbf9-7c3e-431d-a8af-dd7ca13730e5","Type":"ContainerStarted","Data":"39aa764e81cbefd39c547a2238bfeb85c3ba62f6c219a2b584dc6cb14d17430c"} Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.623035 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" event={"ID":"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153","Type":"ContainerStarted","Data":"aa7e8778c633ed1ed1d867e08a5346745733e8ab7f33988ddcf27636190a2cb7"} Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.623044 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" event={"ID":"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153","Type":"ContainerStarted","Data":"f5b62bfabe3d1b438ef66fb79aa859b37e7683e3450be6f7c2f76cd69d086bcb"} Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.624716 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" event={"ID":"86ec5766-3ccf-487f-97bf-f0dda4f06b0e","Type":"ContainerStarted","Data":"7e91dee06efc83b6571d19ab884b1d17b16aaf4f160555be8e410a469ada9302"} Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.643608 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt" event={"ID":"6bece945-b45b-4d5d-aa90-23400b5267d3","Type":"ContainerStarted","Data":"67552528afa745c4825bcde49329dfa0518a579bd66199c2a78ed5fb2739f85e"} Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.643950 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.645605 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" event={"ID":"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4","Type":"ContainerStarted","Data":"203327eec0f2a4f7b47cd5e31565b5c2c32c534d4b5b5d783713c11546d2f0c6"} Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.645624 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" event={"ID":"ee091557-58e0-45ce-bf00-f7f2a1b2ebf4","Type":"ContainerStarted","Data":"886ea1aba3c7019c55b3eb6f602cccb6dc904368d16f2ae27a12166e27dc5fac"} Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.646722 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-7n9dd" event={"ID":"9c115df2-f210-4ff8-a6d8-ffe9d04e739c","Type":"ContainerStarted","Data":"0194c39c3320f134dc68a7af8d377d7d04e9dfa7e898c38a3e94327592f66103"} Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.647397 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dmkj8" event={"ID":"5b6bd3c0-7f03-41d8-bc87-66c374966c21","Type":"ContainerStarted","Data":"5e6fa44b7628263ba335cf9a718e22cd6ae23fc78aa10962373f10ba49611487"} Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.663713 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-zb8wp" event={"ID":"5b7f9021-d531-4dda-89c5-57eff16b24ec","Type":"ContainerStarted","Data":"d4e58d49101b13be13df78de1546ba2d0c62e4356cef4b93f998b8c5cb0955ef"} Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.663755 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-zb8wp" event={"ID":"5b7f9021-d531-4dda-89c5-57eff16b24ec","Type":"ContainerStarted","Data":"2bc7cf400bdafc0f66f056b9ded58d0e580800352947826911f810816904d25b"} Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.680338 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-lb2sb" event={"ID":"a54f432f-761c-419d-9c57-654e4f81a28f","Type":"ContainerStarted","Data":"c340719eb45e423b2e0fe12d27e243ad4560e98235828593383532d30acd13dd"} Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.681794 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:04 crc kubenswrapper[4871]: E1126 05:28:04.682896 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:05.182874481 +0000 UTC m=+143.365926067 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.688631 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.715897 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-ml4vf" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.784574 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:04 crc kubenswrapper[4871]: E1126 05:28:04.786013 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:05.285995228 +0000 UTC m=+143.469046884 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.885881 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:04 crc kubenswrapper[4871]: E1126 05:28:04.886374 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:05.386361266 +0000 UTC m=+143.569412852 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.981509 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" podStartSLOduration=122.981495036 podStartE2EDuration="2m2.981495036s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:04.979849545 +0000 UTC m=+143.162901121" watchObservedRunningTime="2025-11-26 05:28:04.981495036 +0000 UTC m=+143.164546622" Nov 26 05:28:04 crc kubenswrapper[4871]: I1126 05:28:04.987610 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:04 crc kubenswrapper[4871]: E1126 05:28:04.988148 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:05.48813739 +0000 UTC m=+143.671188976 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.089486 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:05 crc kubenswrapper[4871]: E1126 05:28:05.089769 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:05.589738799 +0000 UTC m=+143.772790385 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.089879 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:05 crc kubenswrapper[4871]: E1126 05:28:05.090317 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:05.590310663 +0000 UTC m=+143.773362249 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.191619 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:05 crc kubenswrapper[4871]: E1126 05:28:05.192096 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:05.692060205 +0000 UTC m=+143.875111791 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.226428 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-ml4vf" podStartSLOduration=123.226409274 podStartE2EDuration="2m3.226409274s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:05.18617919 +0000 UTC m=+143.369230786" watchObservedRunningTime="2025-11-26 05:28:05.226409274 +0000 UTC m=+143.409460860" Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.294442 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:05 crc kubenswrapper[4871]: E1126 05:28:05.294729 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:05.79471645 +0000 UTC m=+143.977768026 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.395843 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:05 crc kubenswrapper[4871]: E1126 05:28:05.396462 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:05.896446372 +0000 UTC m=+144.079497958 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.397161 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-h5qx5" podStartSLOduration=123.39714147 podStartE2EDuration="2m3.39714147s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:05.390158127 +0000 UTC m=+143.573209703" watchObservedRunningTime="2025-11-26 05:28:05.39714147 +0000 UTC m=+143.580193056" Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.474120 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" podStartSLOduration=122.47410208 podStartE2EDuration="2m2.47410208s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:05.443570676 +0000 UTC m=+143.626622262" watchObservedRunningTime="2025-11-26 05:28:05.47410208 +0000 UTC m=+143.657153666" Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.498296 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:05 crc kubenswrapper[4871]: E1126 05:28:05.498643 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:05.998632636 +0000 UTC m=+144.181684222 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.550923 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt" podStartSLOduration=123.550891806 podStartE2EDuration="2m3.550891806s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:05.541005432 +0000 UTC m=+143.724057018" watchObservedRunningTime="2025-11-26 05:28:05.550891806 +0000 UTC m=+143.733943392" Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.599211 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:05 crc kubenswrapper[4871]: E1126 05:28:05.600073 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:06.099916617 +0000 UTC m=+144.282968203 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.600239 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:05 crc kubenswrapper[4871]: E1126 05:28:05.600612 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:06.100603764 +0000 UTC m=+144.283655350 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.692738 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dmkj8" event={"ID":"5b6bd3c0-7f03-41d8-bc87-66c374966c21","Type":"ContainerStarted","Data":"9c5c9deef46869e8500d2434d00ab3449f35c43bbc880418ec55f05a9a5ef25b"} Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.696031 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" event={"ID":"86ec5766-3ccf-487f-97bf-f0dda4f06b0e","Type":"ContainerStarted","Data":"9a8bd9ec524d6d172b911cd75905952ca08aadc1705cdeb4c3d7d4c7706b7fc4"} Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.697449 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.701234 4871 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-pz8qb container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" start-of-body= Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.701265 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" podUID="86ec5766-3ccf-487f-97bf-f0dda4f06b0e" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.701672 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:05 crc kubenswrapper[4871]: E1126 05:28:05.701981 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:06.201966457 +0000 UTC m=+144.385018043 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.717390 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vwk2r" event={"ID":"da210515-1701-4ea1-ab3c-4407b119277e","Type":"ContainerStarted","Data":"dd4f77a2c7ed340cf5743a772d6ecfdf80cb8eac4f81c161be88775238453258"} Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.717680 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vwk2r" event={"ID":"da210515-1701-4ea1-ab3c-4407b119277e","Type":"ContainerStarted","Data":"d7f2509e5641688e43869cf1a82ce7936955202b909c1b46d64326a07a874f04"} Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.733238 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-n27d9" event={"ID":"eb62a12d-ae78-45e2-b32d-126d3643260d","Type":"ContainerStarted","Data":"9dfa5c6d5cdcdf89fd16c583a53d066523f8dfdfd72f323ad7d573d3d2d45338"} Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.733279 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-n27d9" event={"ID":"eb62a12d-ae78-45e2-b32d-126d3643260d","Type":"ContainerStarted","Data":"6aafcc26269f5fa7ea346db89edafec5b758151b46328182f3c1cf39d2edff71"} Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.737828 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-h6n6d" event={"ID":"ba736a92-4399-4f3b-bcc9-fa7a6b30f953","Type":"ContainerStarted","Data":"f97c8bb509c8bb6889859c766489034935b2fcb9c752e128c3b9f0a385e28b53"} Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.737862 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-h6n6d" event={"ID":"ba736a92-4399-4f3b-bcc9-fa7a6b30f953","Type":"ContainerStarted","Data":"fd7f9b4e6937375e000a3a46dc5a1c22b2c25aac02fa4f49abe2fa0521a427aa"} Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.739031 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-lb2sb" event={"ID":"a54f432f-761c-419d-9c57-654e4f81a28f","Type":"ContainerStarted","Data":"e5ceb62d0c3a25dd38876b671f40c2ccc6c8bf56e41accd6a03f1a1542bb677e"} Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.740026 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-7n9dd" event={"ID":"9c115df2-f210-4ff8-a6d8-ffe9d04e739c","Type":"ContainerStarted","Data":"4aeff83a2996a26f19c64592db4397bc2f4bfea7a11f3631166dab9b3d51b7f8"} Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.743874 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" event={"ID":"54870e81-ef64-4e07-9190-1bffb7e6db6c","Type":"ContainerStarted","Data":"b6c6bd632da52c2e7520c06f0d8922fee6bc4847f06637c9573e3a9767a58bfa"} Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.743909 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.743931 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" event={"ID":"54870e81-ef64-4e07-9190-1bffb7e6db6c","Type":"ContainerStarted","Data":"a34004b7dd5c7a05d9713329cfee6579c471accf9af47b8d79e014b484717464"} Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.749261 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.803940 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:05 crc kubenswrapper[4871]: E1126 05:28:05.804872 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:06.304855128 +0000 UTC m=+144.487906814 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.827775 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" podStartSLOduration=123.827757083 podStartE2EDuration="2m3.827757083s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:05.825032716 +0000 UTC m=+144.008084302" watchObservedRunningTime="2025-11-26 05:28:05.827757083 +0000 UTC m=+144.010808679" Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.854347 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-kb6mv" podStartSLOduration=123.854328469 podStartE2EDuration="2m3.854328469s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:05.853327815 +0000 UTC m=+144.036379401" watchObservedRunningTime="2025-11-26 05:28:05.854328469 +0000 UTC m=+144.037380065" Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.895107 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zhpbn" podStartSLOduration=122.895093686 podStartE2EDuration="2m2.895093686s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:05.892723447 +0000 UTC m=+144.075775033" watchObservedRunningTime="2025-11-26 05:28:05.895093686 +0000 UTC m=+144.078145272" Nov 26 05:28:05 crc kubenswrapper[4871]: I1126 05:28:05.906403 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:05 crc kubenswrapper[4871]: E1126 05:28:05.907487 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:06.407474242 +0000 UTC m=+144.590525828 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.008383 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:06 crc kubenswrapper[4871]: E1126 05:28:06.008779 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:06.508767193 +0000 UTC m=+144.691818779 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.089144 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-zb8wp" podStartSLOduration=124.089130098 podStartE2EDuration="2m4.089130098s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:06.088194034 +0000 UTC m=+144.271245620" watchObservedRunningTime="2025-11-26 05:28:06.089130098 +0000 UTC m=+144.272181684" Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.109590 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:06 crc kubenswrapper[4871]: E1126 05:28:06.110012 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:06.609996943 +0000 UTC m=+144.793048529 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.197663 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-jgrtb" podStartSLOduration=124.197644117 podStartE2EDuration="2m4.197644117s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:06.195168306 +0000 UTC m=+144.378219892" watchObservedRunningTime="2025-11-26 05:28:06.197644117 +0000 UTC m=+144.380695703" Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.211676 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:06 crc kubenswrapper[4871]: E1126 05:28:06.211990 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:06.711979741 +0000 UTC m=+144.895031327 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.229250 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.250143 4871 patch_prober.go:28] interesting pod/router-default-5444994796-lb2sb container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 05:28:06 crc kubenswrapper[4871]: [-]has-synced failed: reason withheld Nov 26 05:28:06 crc kubenswrapper[4871]: [+]process-running ok Nov 26 05:28:06 crc kubenswrapper[4871]: healthz check failed Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.250187 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-lb2sb" podUID="a54f432f-761c-419d-9c57-654e4f81a28f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.271492 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-lb2sb" podStartSLOduration=123.27147399 podStartE2EDuration="2m3.27147399s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:06.268866166 +0000 UTC m=+144.451917752" watchObservedRunningTime="2025-11-26 05:28:06.27147399 +0000 UTC m=+144.454525576" Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.295862 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-vwk2r" podStartSLOduration=123.295846312 podStartE2EDuration="2m3.295846312s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:06.293684729 +0000 UTC m=+144.476736315" watchObservedRunningTime="2025-11-26 05:28:06.295846312 +0000 UTC m=+144.478897898" Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.315001 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:06 crc kubenswrapper[4871]: E1126 05:28:06.315283 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:06.815270112 +0000 UTC m=+144.998321688 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.364072 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" podStartSLOduration=124.364054877 podStartE2EDuration="2m4.364054877s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:06.358741715 +0000 UTC m=+144.541793301" watchObservedRunningTime="2025-11-26 05:28:06.364054877 +0000 UTC m=+144.547106463" Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.391802 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-n27d9" podStartSLOduration=124.391785291 podStartE2EDuration="2m4.391785291s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:06.391131435 +0000 UTC m=+144.574183021" watchObservedRunningTime="2025-11-26 05:28:06.391785291 +0000 UTC m=+144.574836877" Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.413550 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-h6n6d" podStartSLOduration=123.413515828 podStartE2EDuration="2m3.413515828s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:06.413336514 +0000 UTC m=+144.596388100" watchObservedRunningTime="2025-11-26 05:28:06.413515828 +0000 UTC m=+144.596567414" Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.415881 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:06 crc kubenswrapper[4871]: E1126 05:28:06.416148 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:06.916138153 +0000 UTC m=+145.099189739 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.455405 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-7n9dd" podStartSLOduration=5.455390722 podStartE2EDuration="5.455390722s" podCreationTimestamp="2025-11-26 05:28:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:06.454654954 +0000 UTC m=+144.637706540" watchObservedRunningTime="2025-11-26 05:28:06.455390722 +0000 UTC m=+144.638442308" Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.516867 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:06 crc kubenswrapper[4871]: E1126 05:28:06.517197 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:07.017179198 +0000 UTC m=+145.200230784 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.597923 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-wg5vb"] Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.618681 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:06 crc kubenswrapper[4871]: E1126 05:28:06.619024 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:07.119008902 +0000 UTC m=+145.302060478 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.624749 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-b8k6z"] Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.653559 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rwdp7"] Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.656269 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-9sq6t"] Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.690818 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-f7vqf"] Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.701288 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8"] Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.703247 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f"] Nov 26 05:28:06 crc kubenswrapper[4871]: W1126 05:28:06.712716 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod66f940f0_55f0_4d12_8376_a997d3c802cd.slice/crio-452b986997da64f524112eb279e8c85b9db3d598d23f5eb936b44018b350ffe3 WatchSource:0}: Error finding container 452b986997da64f524112eb279e8c85b9db3d598d23f5eb936b44018b350ffe3: Status 404 returned error can't find the container with id 452b986997da64f524112eb279e8c85b9db3d598d23f5eb936b44018b350ffe3 Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.719309 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:06 crc kubenswrapper[4871]: E1126 05:28:06.719813 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:07.219785871 +0000 UTC m=+145.402837457 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.720886 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj"] Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.732944 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-6knjf"] Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.789340 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-qx4hc"] Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.797443 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8" event={"ID":"0edbface-b4a5-4b10-bb00-a7650f2a2b77","Type":"ContainerStarted","Data":"2748602bbc82a22a87f29e39b892218bb3fffb17dc123986762f72054a89dbee"} Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.816706 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-nntmb"] Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.822978 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:06 crc kubenswrapper[4871]: E1126 05:28:06.823452 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:07.323437221 +0000 UTC m=+145.506488807 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.824600 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-sdxzx"] Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.827403 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2pb6b"] Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.832230 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kjfwt"] Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.836585 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ff9xx"] Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.837490 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dmkj8" event={"ID":"5b6bd3c0-7f03-41d8-bc87-66c374966c21","Type":"ContainerStarted","Data":"d3fbe7a5af7d5d7ed29a3fec7fee94bb924d596a30759d98e7f157aea800722d"} Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.841939 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-f7vqf" event={"ID":"9d4b702c-363c-48e1-aac0-8816682160a6","Type":"ContainerStarted","Data":"dd541ea47b39a860eaa9d364af0d48f0d80bee67dff2db4b5f4f3fb50ac529ed"} Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.842562 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5"] Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.846306 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-wg5vb" event={"ID":"4edc5fd4-3610-4fa0-bf22-5ee6a41f6589","Type":"ContainerStarted","Data":"f5e83f7a1318b95d2d048b86728fcd15f91c4944179b3b855529317ea328c358"} Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.847782 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-9sq6t" event={"ID":"66f940f0-55f0-4d12-8376-a997d3c802cd","Type":"ContainerStarted","Data":"452b986997da64f524112eb279e8c85b9db3d598d23f5eb936b44018b350ffe3"} Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.848172 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb"] Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.849348 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rwdp7" event={"ID":"0cfdd82f-63f7-4804-af6e-7ec8282bbc92","Type":"ContainerStarted","Data":"90102f8f3f78dddb0c367b24fb02f2b048e14f013415bc2a63e6e4e516d1f7e7"} Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.880130 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-rdvkb"] Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.883165 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dmkj8" podStartSLOduration=124.883115984 podStartE2EDuration="2m4.883115984s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:06.878419718 +0000 UTC m=+145.061471304" watchObservedRunningTime="2025-11-26 05:28:06.883115984 +0000 UTC m=+145.066167570" Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.909366 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-b8k6z" event={"ID":"bfda4547-4814-4d32-ba43-b3ffc061bf81","Type":"ContainerStarted","Data":"fb0e8b84861d00114f4575c8ce65d6c9dde6f16eb2ba664b3de6e45ea2dd762f"} Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.923686 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6zcbt" Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.923986 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:06 crc kubenswrapper[4871]: E1126 05:28:06.924610 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:07.424595509 +0000 UTC m=+145.607647085 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.956266 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-t58x9"] Nov 26 05:28:06 crc kubenswrapper[4871]: I1126 05:28:06.970199 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wmtpt"] Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.028716 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.030567 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw"] Nov 26 05:28:07 crc kubenswrapper[4871]: E1126 05:28:07.032193 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:07.532174585 +0000 UTC m=+145.715226171 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.032498 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-cbq2p"] Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.042640 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-sb6f7"] Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.042684 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-rcft8"] Nov 26 05:28:07 crc kubenswrapper[4871]: W1126 05:28:07.104344 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcb3f5110_df96_4946_b0a5_3439ab4e1724.slice/crio-6a185f561958ca7130e1e1cd5db504fcff6a6d19b609555caa7f6aaca8819a21 WatchSource:0}: Error finding container 6a185f561958ca7130e1e1cd5db504fcff6a6d19b609555caa7f6aaca8819a21: Status 404 returned error can't find the container with id 6a185f561958ca7130e1e1cd5db504fcff6a6d19b609555caa7f6aaca8819a21 Nov 26 05:28:07 crc kubenswrapper[4871]: W1126 05:28:07.104922 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod559c6fab_fdbb_495f_933a_90a3957ec82c.slice/crio-1f71312c5886877da21e69ff563e03960ae277c7c34d477028b53e339fd50f00 WatchSource:0}: Error finding container 1f71312c5886877da21e69ff563e03960ae277c7c34d477028b53e339fd50f00: Status 404 returned error can't find the container with id 1f71312c5886877da21e69ff563e03960ae277c7c34d477028b53e339fd50f00 Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.129966 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:07 crc kubenswrapper[4871]: E1126 05:28:07.130230 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:07.630194205 +0000 UTC m=+145.813245791 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.130372 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:07 crc kubenswrapper[4871]: E1126 05:28:07.130898 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:07.630877931 +0000 UTC m=+145.813929577 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:07 crc kubenswrapper[4871]: W1126 05:28:07.194341 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod704ebe80_008e_4369_8003_6d264aa6f6dc.slice/crio-628c12c78d0e7786ff987be2d875414a6776d799244c92f70839cee6259e46cb WatchSource:0}: Error finding container 628c12c78d0e7786ff987be2d875414a6776d799244c92f70839cee6259e46cb: Status 404 returned error can't find the container with id 628c12c78d0e7786ff987be2d875414a6776d799244c92f70839cee6259e46cb Nov 26 05:28:07 crc kubenswrapper[4871]: W1126 05:28:07.208568 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddf51ab2b_98e7_4c22_83ba_2bc1f70eaa07.slice/crio-56d3cab82b032da7301158fa5685c213a84c7f483aa8e4689c923770ac05014e WatchSource:0}: Error finding container 56d3cab82b032da7301158fa5685c213a84c7f483aa8e4689c923770ac05014e: Status 404 returned error can't find the container with id 56d3cab82b032da7301158fa5685c213a84c7f483aa8e4689c923770ac05014e Nov 26 05:28:07 crc kubenswrapper[4871]: W1126 05:28:07.213846 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod851b2861_b400_41ae_9aae_8e041dc4e85a.slice/crio-27540eb21cffbb977dd5523d741d5351706f831a31f2b3f5a4a9970cbd6dbc96 WatchSource:0}: Error finding container 27540eb21cffbb977dd5523d741d5351706f831a31f2b3f5a4a9970cbd6dbc96: Status 404 returned error can't find the container with id 27540eb21cffbb977dd5523d741d5351706f831a31f2b3f5a4a9970cbd6dbc96 Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.239007 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:07 crc kubenswrapper[4871]: E1126 05:28:07.239456 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:07.739439822 +0000 UTC m=+145.922491398 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.247778 4871 patch_prober.go:28] interesting pod/router-default-5444994796-lb2sb container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 05:28:07 crc kubenswrapper[4871]: [-]has-synced failed: reason withheld Nov 26 05:28:07 crc kubenswrapper[4871]: [+]process-running ok Nov 26 05:28:07 crc kubenswrapper[4871]: healthz check failed Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.247813 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-lb2sb" podUID="a54f432f-761c-419d-9c57-654e4f81a28f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.348865 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:07 crc kubenswrapper[4871]: E1126 05:28:07.349304 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:07.849287155 +0000 UTC m=+146.032338741 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.451297 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:07 crc kubenswrapper[4871]: E1126 05:28:07.452001 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:07.951986851 +0000 UTC m=+146.135038437 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.553004 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:07 crc kubenswrapper[4871]: E1126 05:28:07.553325 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:08.053312253 +0000 UTC m=+146.236363839 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.654293 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:07 crc kubenswrapper[4871]: E1126 05:28:07.655018 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:08.155004004 +0000 UTC m=+146.338055590 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.706760 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.706813 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.712329 4871 patch_prober.go:28] interesting pod/apiserver-76f77b778f-ql4w4 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 26 05:28:07 crc kubenswrapper[4871]: [+]log ok Nov 26 05:28:07 crc kubenswrapper[4871]: [+]etcd ok Nov 26 05:28:07 crc kubenswrapper[4871]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 26 05:28:07 crc kubenswrapper[4871]: [+]poststarthook/generic-apiserver-start-informers ok Nov 26 05:28:07 crc kubenswrapper[4871]: [+]poststarthook/max-in-flight-filter ok Nov 26 05:28:07 crc kubenswrapper[4871]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 26 05:28:07 crc kubenswrapper[4871]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 26 05:28:07 crc kubenswrapper[4871]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 26 05:28:07 crc kubenswrapper[4871]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Nov 26 05:28:07 crc kubenswrapper[4871]: [+]poststarthook/project.openshift.io-projectcache ok Nov 26 05:28:07 crc kubenswrapper[4871]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 26 05:28:07 crc kubenswrapper[4871]: [+]poststarthook/openshift.io-startinformers ok Nov 26 05:28:07 crc kubenswrapper[4871]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 26 05:28:07 crc kubenswrapper[4871]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 26 05:28:07 crc kubenswrapper[4871]: livez check failed Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.712395 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" podUID="ee091557-58e0-45ce-bf00-f7f2a1b2ebf4" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.756085 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:07 crc kubenswrapper[4871]: E1126 05:28:07.757695 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:08.257669849 +0000 UTC m=+146.440721435 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.806309 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.860340 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:07 crc kubenswrapper[4871]: E1126 05:28:07.862056 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:08.362037327 +0000 UTC m=+146.545088913 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.932808 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-f7vqf" event={"ID":"9d4b702c-363c-48e1-aac0-8816682160a6","Type":"ContainerStarted","Data":"aa296eac82b75a660acc221d2ac50e5aa1fd220e827f72ca97c8dde6813ff826"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.935969 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" event={"ID":"e028ebf2-4eb3-477a-be5d-ce02dd655d8d","Type":"ContainerStarted","Data":"265eeea807e3528d8e00c9223297625c6daca57569ac5ab9ba5e47203a916332"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.947889 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-9sq6t" event={"ID":"66f940f0-55f0-4d12-8376-a997d3c802cd","Type":"ContainerStarted","Data":"df17c1be4f9903c8aff000bb305dbf169054525e899ebe8ebddbaacf5a296db9"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.949296 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" event={"ID":"df51ab2b-98e7-4c22-83ba-2bc1f70eaa07","Type":"ContainerStarted","Data":"56d3cab82b032da7301158fa5685c213a84c7f483aa8e4689c923770ac05014e"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.950724 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-sb6f7" event={"ID":"5245134d-509e-4548-af72-7c1da043b3f4","Type":"ContainerStarted","Data":"1184e8c0b01c52a03a6c2acf52d2474d35461344170b792009e434990484e5f1"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.951970 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qx4hc" event={"ID":"7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42","Type":"ContainerStarted","Data":"d821886e8a5245909532728f8a27818194bd1e9e6e14a5c9988455979d6daa02"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.953376 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8" event={"ID":"0edbface-b4a5-4b10-bb00-a7650f2a2b77","Type":"ContainerStarted","Data":"d1a9f81e3932ff6dfa4c8603a6154bd6c6f629afe5fb00f420bf3ade17eae194"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.954940 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f" event={"ID":"140ee3ed-e8e4-42c9-b520-8be36158fd66","Type":"ContainerStarted","Data":"1790af997c227f670e4d91382b9f8947dd374a2bde93a332d891203a7352db57"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.954966 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f" event={"ID":"140ee3ed-e8e4-42c9-b520-8be36158fd66","Type":"ContainerStarted","Data":"6f9dd6a68fea5f7ed0c36ae06ca62e6d75eca399c5da20d061bef8de70a846cd"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.956404 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-rdvkb" event={"ID":"317fac77-edf3-46a5-9635-1dd8bb83fea6","Type":"ContainerStarted","Data":"21fb780f21d1913e55911ac35f14ace3af7fa8087f4b6e30471e57b04f8b23ff"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.962763 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:07 crc kubenswrapper[4871]: E1126 05:28:07.963104 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:08.463092532 +0000 UTC m=+146.646144118 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.972324 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-wg5vb" event={"ID":"4edc5fd4-3610-4fa0-bf22-5ee6a41f6589","Type":"ContainerStarted","Data":"bca66fd6647e7c89b913b999bbba18368787d14fcc394a76ed6bb54f7918a97d"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.975636 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-nntmb" event={"ID":"244d9419-a1ed-45ac-9aca-9291a40ed9b3","Type":"ContainerStarted","Data":"50826cbc4b540ab0cabc6998164789455299e1773f014abbf903ca767cba07b7"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.977684 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kjfwt" event={"ID":"99e7e324-afae-4256-915b-325038c897e4","Type":"ContainerStarted","Data":"ff6ec3f2a7851bd6890771bcafd9b501be27c5cc1d6282ab69435c1286aa1205"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.981880 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wmtpt" event={"ID":"559c6fab-fdbb-495f-933a-90a3957ec82c","Type":"ContainerStarted","Data":"1f71312c5886877da21e69ff563e03960ae277c7c34d477028b53e339fd50f00"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.987195 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2pb6b" event={"ID":"dd4302fa-1a28-4718-b14c-f85e45519916","Type":"ContainerStarted","Data":"a74df9616f1afafe836b2d0fe9251b4ec3b5afb8eb89cc83f61b2d1189bbafb6"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.989162 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5" event={"ID":"cb3f5110-df96-4946-b0a5-3439ab4e1724","Type":"ContainerStarted","Data":"2b65b3ae87375dbf86af9555f34774ddda3de3391ea7d761ffeecaebf56ed651"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.989188 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5" event={"ID":"cb3f5110-df96-4946-b0a5-3439ab4e1724","Type":"ContainerStarted","Data":"6a185f561958ca7130e1e1cd5db504fcff6a6d19b609555caa7f6aaca8819a21"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.991618 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-6knjf" event={"ID":"680ec585-d304-48b0-9501-7af7e5bc503b","Type":"ContainerStarted","Data":"03e3339fe26775241d34b5acdf62b2d1de7e9b11f0a215af74dc785602f605c5"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.993012 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-t58x9" event={"ID":"966673ca-eeee-4bc2-84c2-805d4f8f9648","Type":"ContainerStarted","Data":"4f489dfc50b10d3c7acbceefc30c5ad0691f30fe9d278e2a6b828b740384512e"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.994172 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-cbq2p" event={"ID":"ff2fc558-90c0-467d-b6c0-b395c9b26998","Type":"ContainerStarted","Data":"998324252fb333620ccb088e0a5186459b10a32293d5b7f95647b021834a8090"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.995012 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" event={"ID":"704ebe80-008e-4369-8003-6d264aa6f6dc","Type":"ContainerStarted","Data":"628c12c78d0e7786ff987be2d875414a6776d799244c92f70839cee6259e46cb"} Nov 26 05:28:07 crc kubenswrapper[4871]: I1126 05:28:07.999668 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-b8k6z" event={"ID":"bfda4547-4814-4d32-ba43-b3ffc061bf81","Type":"ContainerStarted","Data":"c4aef15ab4b795258ee583bfce65752e5e4a3e46e8f90233c162c29faf939603"} Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.008255 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5" podStartSLOduration=126.008239007 podStartE2EDuration="2m6.008239007s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:08.006826982 +0000 UTC m=+146.189878568" watchObservedRunningTime="2025-11-26 05:28:08.008239007 +0000 UTC m=+146.191290593" Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.010690 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rwdp7" event={"ID":"0cfdd82f-63f7-4804-af6e-7ec8282bbc92","Type":"ContainerStarted","Data":"647cd4b32c8f3b22144cb72ae9bf13d8835168d17bcaa8e2cc9ab34615158f4c"} Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.018719 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj" event={"ID":"a2e2ff1f-e326-4743-8c32-e69f868ef14c","Type":"ContainerStarted","Data":"26366225c624559d4d9e83895f418d92417e57bbbf77e1e63f65f22431ae5bfc"} Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.018769 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj" event={"ID":"a2e2ff1f-e326-4743-8c32-e69f868ef14c","Type":"ContainerStarted","Data":"4b8cec40660338375cb996a1b12f0708aa4a1b7c7be74e1705d5db93e692a51c"} Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.019310 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj" Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.020491 4871 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-jj8jj container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.020550 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj" podUID="a2e2ff1f-e326-4743-8c32-e69f868ef14c" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.025244 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-rcft8" event={"ID":"851b2861-b400-41ae-9aae-8e041dc4e85a","Type":"ContainerStarted","Data":"27540eb21cffbb977dd5523d741d5351706f831a31f2b3f5a4a9970cbd6dbc96"} Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.026907 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" event={"ID":"dff5628c-810a-4f12-a683-341ebc57530a","Type":"ContainerStarted","Data":"fa41b9c12faa79e29950b408c822c14d02f42298db69ddb1b8813ffaf95bb768"} Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.036623 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj" podStartSLOduration=125.036605327 podStartE2EDuration="2m5.036605327s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:08.034577637 +0000 UTC m=+146.217629223" watchObservedRunningTime="2025-11-26 05:28:08.036605327 +0000 UTC m=+146.219656913" Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.064026 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:08 crc kubenswrapper[4871]: E1126 05:28:08.064380 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:08.564363833 +0000 UTC m=+146.747415419 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.064819 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:08 crc kubenswrapper[4871]: E1126 05:28:08.066679 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:08.56667082 +0000 UTC m=+146.749722406 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.166131 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:08 crc kubenswrapper[4871]: E1126 05:28:08.167327 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:08.667306275 +0000 UTC m=+146.850357871 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.233211 4871 patch_prober.go:28] interesting pod/router-default-5444994796-lb2sb container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 05:28:08 crc kubenswrapper[4871]: [-]has-synced failed: reason withheld Nov 26 05:28:08 crc kubenswrapper[4871]: [+]process-running ok Nov 26 05:28:08 crc kubenswrapper[4871]: healthz check failed Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.233492 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-lb2sb" podUID="a54f432f-761c-419d-9c57-654e4f81a28f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.268759 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:08 crc kubenswrapper[4871]: E1126 05:28:08.269200 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:08.769185461 +0000 UTC m=+146.952237047 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.369834 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:08 crc kubenswrapper[4871]: E1126 05:28:08.370277 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:08.870261307 +0000 UTC m=+147.053312893 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.471009 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:08 crc kubenswrapper[4871]: E1126 05:28:08.471419 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:08.971408165 +0000 UTC m=+147.154459751 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.572026 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:08 crc kubenswrapper[4871]: E1126 05:28:08.572272 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:09.072247985 +0000 UTC m=+147.255299581 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.572643 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:08 crc kubenswrapper[4871]: E1126 05:28:08.573116 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:09.073105586 +0000 UTC m=+147.256157172 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.674581 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:08 crc kubenswrapper[4871]: E1126 05:28:08.675314 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:09.175296549 +0000 UTC m=+147.358348135 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.776344 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:08 crc kubenswrapper[4871]: E1126 05:28:08.776720 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:09.276707804 +0000 UTC m=+147.459759380 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.878113 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:08 crc kubenswrapper[4871]: E1126 05:28:08.878294 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:09.378266082 +0000 UTC m=+147.561317668 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.878614 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:08 crc kubenswrapper[4871]: E1126 05:28:08.878974 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:09.378959989 +0000 UTC m=+147.562011575 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.979847 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:08 crc kubenswrapper[4871]: E1126 05:28:08.980054 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:09.480024744 +0000 UTC m=+147.663076340 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:08 crc kubenswrapper[4871]: I1126 05:28:08.980112 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:08 crc kubenswrapper[4871]: E1126 05:28:08.980732 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:09.480721112 +0000 UTC m=+147.663772698 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.032494 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" event={"ID":"df51ab2b-98e7-4c22-83ba-2bc1f70eaa07","Type":"ContainerStarted","Data":"2e5ce8551f4db729371270b3985e01b2c4f5b18d5afbef27600bf1137dcd37b9"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.032983 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.033931 4871 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-k95qw container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.39:5443/healthz\": dial tcp 10.217.0.39:5443: connect: connection refused" start-of-body= Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.034083 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" podUID="df51ab2b-98e7-4c22-83ba-2bc1f70eaa07" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.39:5443/healthz\": dial tcp 10.217.0.39:5443: connect: connection refused" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.035069 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-cbq2p" event={"ID":"ff2fc558-90c0-467d-b6c0-b395c9b26998","Type":"ContainerStarted","Data":"6f1437ccbd9a752dbf46d6941ecc7c3c238c7e98be046ba8523c6038a304b8d7"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.037703 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-f7vqf" event={"ID":"9d4b702c-363c-48e1-aac0-8816682160a6","Type":"ContainerStarted","Data":"1b9bf84bb3794fb43eb55cb1267b843faccb118a03513dd6f7ba4bf0c80440f3"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.040097 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-9sq6t" event={"ID":"66f940f0-55f0-4d12-8376-a997d3c802cd","Type":"ContainerStarted","Data":"e1f91a102c3b958840560c5e51bc8f1187b8d6b28444a30c7e4307a357cd6b42"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.041925 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" event={"ID":"e028ebf2-4eb3-477a-be5d-ce02dd655d8d","Type":"ContainerStarted","Data":"cab69f2facebd2679eadd70d1a6b8effc3f78f8a91d1f167a038c2ff418a6521"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.042383 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.043430 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kjfwt" event={"ID":"99e7e324-afae-4256-915b-325038c897e4","Type":"ContainerStarted","Data":"da14e86e19572858b89e6e569da868dcf8c204e33e60ee453b96a20b98849a7c"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.043558 4871 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-ff9xx container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/healthz\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.043593 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" podUID="e028ebf2-4eb3-477a-be5d-ce02dd655d8d" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.28:8080/healthz\": dial tcp 10.217.0.28:8080: connect: connection refused" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.044845 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-rdvkb" event={"ID":"317fac77-edf3-46a5-9635-1dd8bb83fea6","Type":"ContainerStarted","Data":"6fe2acd2ce41f86d9a0bd1980bdc6379184268abc0024f446c874dfcd2c44323"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.045878 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-sb6f7" event={"ID":"5245134d-509e-4548-af72-7c1da043b3f4","Type":"ContainerStarted","Data":"66032b1b02982b09efc1e90405a740d8b7b11dab3224a6af5a717665a12dec67"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.048005 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2pb6b" event={"ID":"dd4302fa-1a28-4718-b14c-f85e45519916","Type":"ContainerStarted","Data":"2bd915d7b2caa1bfdebba80510d7e7690605d1413a1028c29532ac5661bb5808"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.049781 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-6knjf" event={"ID":"680ec585-d304-48b0-9501-7af7e5bc503b","Type":"ContainerStarted","Data":"ff58f0a77d7b382be5740f41bbc470fe0d5ef9b9e0589428c725a30425e35beb"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.051349 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qx4hc" event={"ID":"7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42","Type":"ContainerStarted","Data":"f35961cc9bc29a73e3f8207ce3ae386be99a863855883fce5ceb8622aa831984"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.051566 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qx4hc" event={"ID":"7a3bd369-36ee-4b9a-a8dd-9bbfeca9cd42","Type":"ContainerStarted","Data":"76d0ebb669fc225f972360164ebce26de312e3e9b20eba809e40bf4e22195c11"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.053622 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f" event={"ID":"140ee3ed-e8e4-42c9-b520-8be36158fd66","Type":"ContainerStarted","Data":"1dc4ba864df94ff6cb15f62e94c417456ddcb46681c71e2ac033c30cf92e4b0e"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.055557 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rwdp7" event={"ID":"0cfdd82f-63f7-4804-af6e-7ec8282bbc92","Type":"ContainerStarted","Data":"d3cda5df5c9bf9b52257c4fc6e07dbe7e497027d66b2bffa2d751aa9c5247df0"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.055906 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rwdp7" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.057301 4871 generic.go:334] "Generic (PLEG): container finished" podID="704ebe80-008e-4369-8003-6d264aa6f6dc" containerID="44dda9e291aaa98bfc81d2c2c9fa0b89f4a4681091431c87f3bc81f38594e277" exitCode=0 Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.057372 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" event={"ID":"704ebe80-008e-4369-8003-6d264aa6f6dc","Type":"ContainerDied","Data":"44dda9e291aaa98bfc81d2c2c9fa0b89f4a4681091431c87f3bc81f38594e277"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.058964 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" podStartSLOduration=126.058945683 podStartE2EDuration="2m6.058945683s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.056498983 +0000 UTC m=+147.239550569" watchObservedRunningTime="2025-11-26 05:28:09.058945683 +0000 UTC m=+147.241997279" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.062307 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-rcft8" event={"ID":"851b2861-b400-41ae-9aae-8e041dc4e85a","Type":"ContainerStarted","Data":"deab7c5848fa6350d8d71e86b53560907ef3f61a0b6cfac1a0d1e344d3764f57"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.065303 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" event={"ID":"dff5628c-810a-4f12-a683-341ebc57530a","Type":"ContainerStarted","Data":"6de3a2a5777e094d89001c6ec25a5229fafeffca2f4fa61b387af8aa983b6968"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.077106 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qx4hc" podStartSLOduration=126.077087711 podStartE2EDuration="2m6.077087711s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.076098017 +0000 UTC m=+147.259149603" watchObservedRunningTime="2025-11-26 05:28:09.077087711 +0000 UTC m=+147.260139297" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.081220 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:09 crc kubenswrapper[4871]: E1126 05:28:09.087792 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:09.587757905 +0000 UTC m=+147.770809501 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.088081 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-wg5vb" event={"ID":"4edc5fd4-3610-4fa0-bf22-5ee6a41f6589","Type":"ContainerStarted","Data":"4c04631e2b611f482553f70d220e79c034ee2f3f32c828654c2d8f4f1f0903af"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.102353 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8" event={"ID":"0edbface-b4a5-4b10-bb00-a7650f2a2b77","Type":"ContainerStarted","Data":"3e9bc8c31779b79624a0f3b05cde482ca095f3a1d72d30bf297d47769af30d33"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.117212 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" podStartSLOduration=126.117191902 podStartE2EDuration="2m6.117191902s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.115260854 +0000 UTC m=+147.298312440" watchObservedRunningTime="2025-11-26 05:28:09.117191902 +0000 UTC m=+147.300243488" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.120934 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wmtpt" event={"ID":"559c6fab-fdbb-495f-933a-90a3957ec82c","Type":"ContainerStarted","Data":"580168ab76e25b413e20c1a8223b6e469abfc4edca78a0c58fbfb13f3c61a439"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.121423 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wmtpt" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.125776 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-t58x9" event={"ID":"966673ca-eeee-4bc2-84c2-805d4f8f9648","Type":"ContainerStarted","Data":"38b1bdbdcfe6ae22dc1e65774c086b2cfdf9ae17389bb58732039f5f423e90cf"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.127485 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-nntmb" event={"ID":"244d9419-a1ed-45ac-9aca-9291a40ed9b3","Type":"ContainerStarted","Data":"3faa1e2ace1d874cde771087539fe1585760e4df2ca0344c4c1ec5f71e1d82eb"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.127512 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-nntmb" event={"ID":"244d9419-a1ed-45ac-9aca-9291a40ed9b3","Type":"ContainerStarted","Data":"15d2fb34102e7b498f1ce078c915431f213b1f811eeccd6e3ca9035a169abb3d"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.127850 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-nntmb" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.137808 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wmtpt" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.142265 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-b8k6z" event={"ID":"bfda4547-4814-4d32-ba43-b3ffc061bf81","Type":"ContainerStarted","Data":"b8551e6ab04c40de8a6070f190d2d39b4976852375780450a47ba2d2aebdf8c2"} Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.148271 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-cbq2p" podStartSLOduration=126.148251939 podStartE2EDuration="2m6.148251939s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.14790031 +0000 UTC m=+147.330951896" watchObservedRunningTime="2025-11-26 05:28:09.148251939 +0000 UTC m=+147.331303535" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.150050 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jj8jj" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.188422 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:09 crc kubenswrapper[4871]: E1126 05:28:09.189550 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:09.689537148 +0000 UTC m=+147.872588734 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.214662 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-f7vqf" podStartSLOduration=126.214646158 podStartE2EDuration="2m6.214646158s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.175511862 +0000 UTC m=+147.358563448" watchObservedRunningTime="2025-11-26 05:28:09.214646158 +0000 UTC m=+147.397697744" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.239753 4871 patch_prober.go:28] interesting pod/router-default-5444994796-lb2sb container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 05:28:09 crc kubenswrapper[4871]: [-]has-synced failed: reason withheld Nov 26 05:28:09 crc kubenswrapper[4871]: [+]process-running ok Nov 26 05:28:09 crc kubenswrapper[4871]: healthz check failed Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.241468 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-lb2sb" podUID="a54f432f-761c-419d-9c57-654e4f81a28f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.242404 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-rdvkb" podStartSLOduration=127.242390833 podStartE2EDuration="2m7.242390833s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.215308874 +0000 UTC m=+147.398360450" watchObservedRunningTime="2025-11-26 05:28:09.242390833 +0000 UTC m=+147.425442429" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.244806 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-pkt8f" podStartSLOduration=127.244794773 podStartE2EDuration="2m7.244794773s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.24388951 +0000 UTC m=+147.426941096" watchObservedRunningTime="2025-11-26 05:28:09.244794773 +0000 UTC m=+147.427846359" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.270356 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-2pb6b" podStartSLOduration=126.270336313 podStartE2EDuration="2m6.270336313s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.267678718 +0000 UTC m=+147.450730314" watchObservedRunningTime="2025-11-26 05:28:09.270336313 +0000 UTC m=+147.453387889" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.290347 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:09 crc kubenswrapper[4871]: E1126 05:28:09.290742 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:09.790711387 +0000 UTC m=+147.973762973 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.294957 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-kjfwt" podStartSLOduration=127.294938261 podStartE2EDuration="2m7.294938261s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.29447949 +0000 UTC m=+147.477531076" watchObservedRunningTime="2025-11-26 05:28:09.294938261 +0000 UTC m=+147.477989847" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.322793 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-sb6f7" podStartSLOduration=126.322778088 podStartE2EDuration="2m6.322778088s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.320879561 +0000 UTC m=+147.503931147" watchObservedRunningTime="2025-11-26 05:28:09.322778088 +0000 UTC m=+147.505829674" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.394264 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:09 crc kubenswrapper[4871]: E1126 05:28:09.394943 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:09.89492741 +0000 UTC m=+148.077979006 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.424514 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-9sq6t" podStartSLOduration=127.42449666 podStartE2EDuration="2m7.42449666s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.424321816 +0000 UTC m=+147.607373412" watchObservedRunningTime="2025-11-26 05:28:09.42449666 +0000 UTC m=+147.607548246" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.425502 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rwdp7" podStartSLOduration=126.425496945 podStartE2EDuration="2m6.425496945s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.378026403 +0000 UTC m=+147.561077989" watchObservedRunningTime="2025-11-26 05:28:09.425496945 +0000 UTC m=+147.608548531" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.495329 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:09 crc kubenswrapper[4871]: E1126 05:28:09.495662 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:09.995645957 +0000 UTC m=+148.178697543 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.502616 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-wmtpt" podStartSLOduration=126.502600789 podStartE2EDuration="2m6.502600789s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.50103246 +0000 UTC m=+147.684084046" watchObservedRunningTime="2025-11-26 05:28:09.502600789 +0000 UTC m=+147.685652375" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.538695 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-nntmb" podStartSLOduration=8.53868008 podStartE2EDuration="8.53868008s" podCreationTimestamp="2025-11-26 05:28:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.536881736 +0000 UTC m=+147.719933312" watchObservedRunningTime="2025-11-26 05:28:09.53868008 +0000 UTC m=+147.721731666" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.568473 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-sxhc8" podStartSLOduration=126.568452545 podStartE2EDuration="2m6.568452545s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.564405815 +0000 UTC m=+147.747457401" watchObservedRunningTime="2025-11-26 05:28:09.568452545 +0000 UTC m=+147.751504131" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.596292 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:09 crc kubenswrapper[4871]: E1126 05:28:09.596663 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:10.096646461 +0000 UTC m=+148.279698047 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.668150 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-rcft8" podStartSLOduration=8.668135877 podStartE2EDuration="8.668135877s" podCreationTimestamp="2025-11-26 05:28:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.667336567 +0000 UTC m=+147.850388153" watchObservedRunningTime="2025-11-26 05:28:09.668135877 +0000 UTC m=+147.851187463" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.668425 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-wg5vb" podStartSLOduration=126.668421324 podStartE2EDuration="2m6.668421324s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.638871654 +0000 UTC m=+147.821923240" watchObservedRunningTime="2025-11-26 05:28:09.668421324 +0000 UTC m=+147.851472910" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.694343 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-sdxzx" podStartSLOduration=127.694324133 podStartE2EDuration="2m7.694324133s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.693019401 +0000 UTC m=+147.876070987" watchObservedRunningTime="2025-11-26 05:28:09.694324133 +0000 UTC m=+147.877375729" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.697707 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:09 crc kubenswrapper[4871]: E1126 05:28:09.697992 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:10.197974614 +0000 UTC m=+148.381026200 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.722594 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-b8k6z" podStartSLOduration=126.722576411 podStartE2EDuration="2m6.722576411s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.721110225 +0000 UTC m=+147.904161811" watchObservedRunningTime="2025-11-26 05:28:09.722576411 +0000 UTC m=+147.905627997" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.760595 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-t58x9" podStartSLOduration=126.76058001 podStartE2EDuration="2m6.76058001s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:09.74481494 +0000 UTC m=+147.927866526" watchObservedRunningTime="2025-11-26 05:28:09.76058001 +0000 UTC m=+147.943631596" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.761697 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.762215 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.769158 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.768295 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.776970 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.799420 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:09 crc kubenswrapper[4871]: E1126 05:28:09.799978 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:10.299951872 +0000 UTC m=+148.483003458 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.904776 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.904982 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a07fd4ff-d54d-42ef-8223-039275189667-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a07fd4ff-d54d-42ef-8223-039275189667\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 05:28:09 crc kubenswrapper[4871]: I1126 05:28:09.905054 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a07fd4ff-d54d-42ef-8223-039275189667-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a07fd4ff-d54d-42ef-8223-039275189667\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 05:28:09 crc kubenswrapper[4871]: E1126 05:28:09.905162 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:10.405147309 +0000 UTC m=+148.588198895 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.007004 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a07fd4ff-d54d-42ef-8223-039275189667-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a07fd4ff-d54d-42ef-8223-039275189667\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.007080 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.007127 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a07fd4ff-d54d-42ef-8223-039275189667-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a07fd4ff-d54d-42ef-8223-039275189667\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.007245 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a07fd4ff-d54d-42ef-8223-039275189667-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a07fd4ff-d54d-42ef-8223-039275189667\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 05:28:10 crc kubenswrapper[4871]: E1126 05:28:10.007753 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:10.507743793 +0000 UTC m=+148.690795379 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.032446 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a07fd4ff-d54d-42ef-8223-039275189667-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a07fd4ff-d54d-42ef-8223-039275189667\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.079122 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.108600 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:10 crc kubenswrapper[4871]: E1126 05:28:10.108761 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:10.608732497 +0000 UTC m=+148.791784083 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.108865 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:10 crc kubenswrapper[4871]: E1126 05:28:10.109167 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:10.609155697 +0000 UTC m=+148.792207273 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.123769 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.150019 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" event={"ID":"704ebe80-008e-4369-8003-6d264aa6f6dc","Type":"ContainerStarted","Data":"1b8b1e6413947c6d69be9642aa39f7cf788033a36df3eb846ca007baa19a8426"} Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.164794 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-6knjf" event={"ID":"680ec585-d304-48b0-9501-7af7e5bc503b","Type":"ContainerStarted","Data":"e2db6219ca493a59d7231d76c2e7b174b2b61b2ecac588160e0870e6af67bc9c"} Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.167084 4871 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdvkb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.167124 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-rdvkb" podUID="317fac77-edf3-46a5-9635-1dd8bb83fea6" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.168784 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-rdvkb" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.169207 4871 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-ff9xx container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.28:8080/healthz\": dial tcp 10.217.0.28:8080: connect: connection refused" start-of-body= Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.169269 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" podUID="e028ebf2-4eb3-477a-be5d-ce02dd655d8d" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.28:8080/healthz\": dial tcp 10.217.0.28:8080: connect: connection refused" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.213146 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:10 crc kubenswrapper[4871]: E1126 05:28:10.213799 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:10.713783291 +0000 UTC m=+148.896834877 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.217991 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-k95qw" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.245416 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" podStartSLOduration=127.245400192 podStartE2EDuration="2m7.245400192s" podCreationTimestamp="2025-11-26 05:26:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:10.182050177 +0000 UTC m=+148.365101763" watchObservedRunningTime="2025-11-26 05:28:10.245400192 +0000 UTC m=+148.428451778" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.247705 4871 patch_prober.go:28] interesting pod/router-default-5444994796-lb2sb container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 05:28:10 crc kubenswrapper[4871]: [-]has-synced failed: reason withheld Nov 26 05:28:10 crc kubenswrapper[4871]: [+]process-running ok Nov 26 05:28:10 crc kubenswrapper[4871]: healthz check failed Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.247755 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-lb2sb" podUID="a54f432f-761c-419d-9c57-654e4f81a28f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.316099 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.316143 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.316347 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:28:10 crc kubenswrapper[4871]: E1126 05:28:10.323661 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:10.823644454 +0000 UTC m=+149.006696040 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.329440 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.340317 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.356774 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-z482k"] Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.357682 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z482k" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.362366 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.370297 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z482k"] Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.420038 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:10 crc kubenswrapper[4871]: E1126 05:28:10.420330 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:10.920314781 +0000 UTC m=+149.103366367 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.521283 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b042c85e-dbcc-488a-afca-602eadb2a09a-catalog-content\") pod \"certified-operators-z482k\" (UID: \"b042c85e-dbcc-488a-afca-602eadb2a09a\") " pod="openshift-marketplace/certified-operators-z482k" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.521390 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgr7l\" (UniqueName: \"kubernetes.io/projected/b042c85e-dbcc-488a-afca-602eadb2a09a-kube-api-access-jgr7l\") pod \"certified-operators-z482k\" (UID: \"b042c85e-dbcc-488a-afca-602eadb2a09a\") " pod="openshift-marketplace/certified-operators-z482k" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.521439 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.521461 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.521511 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b042c85e-dbcc-488a-afca-602eadb2a09a-utilities\") pod \"certified-operators-z482k\" (UID: \"b042c85e-dbcc-488a-afca-602eadb2a09a\") " pod="openshift-marketplace/certified-operators-z482k" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.521557 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:10 crc kubenswrapper[4871]: E1126 05:28:10.521857 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:11.021830378 +0000 UTC m=+149.204881964 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.525625 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.525784 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.541590 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.541658 4871 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.545574 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vx568"] Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.545649 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.546490 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vx568" Nov 26 05:28:10 crc kubenswrapper[4871]: W1126 05:28:10.549259 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-poda07fd4ff_d54d_42ef_8223_039275189667.slice/crio-9d47dd624066c37b8b83213aec4c3bd3dc7fecd2bd23b7a22dcb8b0b0c93b4fb WatchSource:0}: Error finding container 9d47dd624066c37b8b83213aec4c3bd3dc7fecd2bd23b7a22dcb8b0b0c93b4fb: Status 404 returned error can't find the container with id 9d47dd624066c37b8b83213aec4c3bd3dc7fecd2bd23b7a22dcb8b0b0c93b4fb Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.549462 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.554866 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.567435 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.567473 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vx568"] Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.624100 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.624515 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20dfe3fa-af2d-4906-bc8c-21d863489308-utilities\") pod \"community-operators-vx568\" (UID: \"20dfe3fa-af2d-4906-bc8c-21d863489308\") " pod="openshift-marketplace/community-operators-vx568" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.624605 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgr7l\" (UniqueName: \"kubernetes.io/projected/b042c85e-dbcc-488a-afca-602eadb2a09a-kube-api-access-jgr7l\") pod \"certified-operators-z482k\" (UID: \"b042c85e-dbcc-488a-afca-602eadb2a09a\") " pod="openshift-marketplace/certified-operators-z482k" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.624633 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20dfe3fa-af2d-4906-bc8c-21d863489308-catalog-content\") pod \"community-operators-vx568\" (UID: \"20dfe3fa-af2d-4906-bc8c-21d863489308\") " pod="openshift-marketplace/community-operators-vx568" Nov 26 05:28:10 crc kubenswrapper[4871]: E1126 05:28:10.624707 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:11.124679388 +0000 UTC m=+149.307730974 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.624972 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b042c85e-dbcc-488a-afca-602eadb2a09a-utilities\") pod \"certified-operators-z482k\" (UID: \"b042c85e-dbcc-488a-afca-602eadb2a09a\") " pod="openshift-marketplace/certified-operators-z482k" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.625011 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.625034 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b042c85e-dbcc-488a-afca-602eadb2a09a-catalog-content\") pod \"certified-operators-z482k\" (UID: \"b042c85e-dbcc-488a-afca-602eadb2a09a\") " pod="openshift-marketplace/certified-operators-z482k" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.625056 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lhxd\" (UniqueName: \"kubernetes.io/projected/20dfe3fa-af2d-4906-bc8c-21d863489308-kube-api-access-7lhxd\") pod \"community-operators-vx568\" (UID: \"20dfe3fa-af2d-4906-bc8c-21d863489308\") " pod="openshift-marketplace/community-operators-vx568" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.625429 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b042c85e-dbcc-488a-afca-602eadb2a09a-utilities\") pod \"certified-operators-z482k\" (UID: \"b042c85e-dbcc-488a-afca-602eadb2a09a\") " pod="openshift-marketplace/certified-operators-z482k" Nov 26 05:28:10 crc kubenswrapper[4871]: E1126 05:28:10.625678 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 05:28:11.125668082 +0000 UTC m=+149.308719668 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-tcqk7" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.626239 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b042c85e-dbcc-488a-afca-602eadb2a09a-catalog-content\") pod \"certified-operators-z482k\" (UID: \"b042c85e-dbcc-488a-afca-602eadb2a09a\") " pod="openshift-marketplace/certified-operators-z482k" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.654751 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgr7l\" (UniqueName: \"kubernetes.io/projected/b042c85e-dbcc-488a-afca-602eadb2a09a-kube-api-access-jgr7l\") pod \"certified-operators-z482k\" (UID: \"b042c85e-dbcc-488a-afca-602eadb2a09a\") " pod="openshift-marketplace/certified-operators-z482k" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.687287 4871 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-26T05:28:10.541673578Z","Handler":null,"Name":""} Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.718098 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z482k" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.726406 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.726659 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20dfe3fa-af2d-4906-bc8c-21d863489308-catalog-content\") pod \"community-operators-vx568\" (UID: \"20dfe3fa-af2d-4906-bc8c-21d863489308\") " pod="openshift-marketplace/community-operators-vx568" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.726715 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lhxd\" (UniqueName: \"kubernetes.io/projected/20dfe3fa-af2d-4906-bc8c-21d863489308-kube-api-access-7lhxd\") pod \"community-operators-vx568\" (UID: \"20dfe3fa-af2d-4906-bc8c-21d863489308\") " pod="openshift-marketplace/community-operators-vx568" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.726767 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20dfe3fa-af2d-4906-bc8c-21d863489308-utilities\") pod \"community-operators-vx568\" (UID: \"20dfe3fa-af2d-4906-bc8c-21d863489308\") " pod="openshift-marketplace/community-operators-vx568" Nov 26 05:28:10 crc kubenswrapper[4871]: E1126 05:28:10.727211 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 05:28:11.227193258 +0000 UTC m=+149.410244834 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.727500 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20dfe3fa-af2d-4906-bc8c-21d863489308-utilities\") pod \"community-operators-vx568\" (UID: \"20dfe3fa-af2d-4906-bc8c-21d863489308\") " pod="openshift-marketplace/community-operators-vx568" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.728878 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20dfe3fa-af2d-4906-bc8c-21d863489308-catalog-content\") pod \"community-operators-vx568\" (UID: \"20dfe3fa-af2d-4906-bc8c-21d863489308\") " pod="openshift-marketplace/community-operators-vx568" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.729662 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6c56g"] Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.730736 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6c56g" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.740413 4871 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.740453 4871 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.756155 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6c56g"] Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.758746 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lhxd\" (UniqueName: \"kubernetes.io/projected/20dfe3fa-af2d-4906-bc8c-21d863489308-kube-api-access-7lhxd\") pod \"community-operators-vx568\" (UID: \"20dfe3fa-af2d-4906-bc8c-21d863489308\") " pod="openshift-marketplace/community-operators-vx568" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.828187 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.828444 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7d5d2e5-f16d-4653-b6d4-4c5c75501655-utilities\") pod \"certified-operators-6c56g\" (UID: \"f7d5d2e5-f16d-4653-b6d4-4c5c75501655\") " pod="openshift-marketplace/certified-operators-6c56g" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.828499 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7d5d2e5-f16d-4653-b6d4-4c5c75501655-catalog-content\") pod \"certified-operators-6c56g\" (UID: \"f7d5d2e5-f16d-4653-b6d4-4c5c75501655\") " pod="openshift-marketplace/certified-operators-6c56g" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.828550 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjhkp\" (UniqueName: \"kubernetes.io/projected/f7d5d2e5-f16d-4653-b6d4-4c5c75501655-kube-api-access-zjhkp\") pod \"certified-operators-6c56g\" (UID: \"f7d5d2e5-f16d-4653-b6d4-4c5c75501655\") " pod="openshift-marketplace/certified-operators-6c56g" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.842855 4871 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.842886 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.904470 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-tcqk7\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.918760 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vx568" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.928746 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hlfdn"] Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.929875 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hlfdn" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.941618 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.942214 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7d5d2e5-f16d-4653-b6d4-4c5c75501655-utilities\") pod \"certified-operators-6c56g\" (UID: \"f7d5d2e5-f16d-4653-b6d4-4c5c75501655\") " pod="openshift-marketplace/certified-operators-6c56g" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.942277 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7d5d2e5-f16d-4653-b6d4-4c5c75501655-catalog-content\") pod \"certified-operators-6c56g\" (UID: \"f7d5d2e5-f16d-4653-b6d4-4c5c75501655\") " pod="openshift-marketplace/certified-operators-6c56g" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.942309 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjhkp\" (UniqueName: \"kubernetes.io/projected/f7d5d2e5-f16d-4653-b6d4-4c5c75501655-kube-api-access-zjhkp\") pod \"certified-operators-6c56g\" (UID: \"f7d5d2e5-f16d-4653-b6d4-4c5c75501655\") " pod="openshift-marketplace/certified-operators-6c56g" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.943299 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7d5d2e5-f16d-4653-b6d4-4c5c75501655-utilities\") pod \"certified-operators-6c56g\" (UID: \"f7d5d2e5-f16d-4653-b6d4-4c5c75501655\") " pod="openshift-marketplace/certified-operators-6c56g" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.943592 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7d5d2e5-f16d-4653-b6d4-4c5c75501655-catalog-content\") pod \"certified-operators-6c56g\" (UID: \"f7d5d2e5-f16d-4653-b6d4-4c5c75501655\") " pod="openshift-marketplace/certified-operators-6c56g" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.946978 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hlfdn"] Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.956291 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 05:28:10 crc kubenswrapper[4871]: I1126 05:28:10.983375 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjhkp\" (UniqueName: \"kubernetes.io/projected/f7d5d2e5-f16d-4653-b6d4-4c5c75501655-kube-api-access-zjhkp\") pod \"certified-operators-6c56g\" (UID: \"f7d5d2e5-f16d-4653-b6d4-4c5c75501655\") " pod="openshift-marketplace/certified-operators-6c56g" Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.018985 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.048390 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8bx4\" (UniqueName: \"kubernetes.io/projected/4776b484-dfbf-4547-9409-e2317c476932-kube-api-access-q8bx4\") pod \"community-operators-hlfdn\" (UID: \"4776b484-dfbf-4547-9409-e2317c476932\") " pod="openshift-marketplace/community-operators-hlfdn" Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.048477 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4776b484-dfbf-4547-9409-e2317c476932-utilities\") pod \"community-operators-hlfdn\" (UID: \"4776b484-dfbf-4547-9409-e2317c476932\") " pod="openshift-marketplace/community-operators-hlfdn" Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.048507 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4776b484-dfbf-4547-9409-e2317c476932-catalog-content\") pod \"community-operators-hlfdn\" (UID: \"4776b484-dfbf-4547-9409-e2317c476932\") " pod="openshift-marketplace/community-operators-hlfdn" Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.115816 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6c56g" Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.150139 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8bx4\" (UniqueName: \"kubernetes.io/projected/4776b484-dfbf-4547-9409-e2317c476932-kube-api-access-q8bx4\") pod \"community-operators-hlfdn\" (UID: \"4776b484-dfbf-4547-9409-e2317c476932\") " pod="openshift-marketplace/community-operators-hlfdn" Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.150210 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4776b484-dfbf-4547-9409-e2317c476932-utilities\") pod \"community-operators-hlfdn\" (UID: \"4776b484-dfbf-4547-9409-e2317c476932\") " pod="openshift-marketplace/community-operators-hlfdn" Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.150243 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4776b484-dfbf-4547-9409-e2317c476932-catalog-content\") pod \"community-operators-hlfdn\" (UID: \"4776b484-dfbf-4547-9409-e2317c476932\") " pod="openshift-marketplace/community-operators-hlfdn" Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.151775 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4776b484-dfbf-4547-9409-e2317c476932-catalog-content\") pod \"community-operators-hlfdn\" (UID: \"4776b484-dfbf-4547-9409-e2317c476932\") " pod="openshift-marketplace/community-operators-hlfdn" Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.152251 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4776b484-dfbf-4547-9409-e2317c476932-utilities\") pod \"community-operators-hlfdn\" (UID: \"4776b484-dfbf-4547-9409-e2317c476932\") " pod="openshift-marketplace/community-operators-hlfdn" Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.180750 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8bx4\" (UniqueName: \"kubernetes.io/projected/4776b484-dfbf-4547-9409-e2317c476932-kube-api-access-q8bx4\") pod \"community-operators-hlfdn\" (UID: \"4776b484-dfbf-4547-9409-e2317c476932\") " pod="openshift-marketplace/community-operators-hlfdn" Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.197691 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"442e721572c2ccc610ff43ed9d38ebf752d8eabd679530c3ab502beb17acd973"} Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.204639 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"a07fd4ff-d54d-42ef-8223-039275189667","Type":"ContainerStarted","Data":"e08e3f03a93f20a5805de9ae363180ef1ea7ce61e57d5b04ad317c5c983f7d97"} Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.204674 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"a07fd4ff-d54d-42ef-8223-039275189667","Type":"ContainerStarted","Data":"9d47dd624066c37b8b83213aec4c3bd3dc7fecd2bd23b7a22dcb8b0b0c93b4fb"} Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.212737 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-6knjf" event={"ID":"680ec585-d304-48b0-9501-7af7e5bc503b","Type":"ContainerStarted","Data":"3b5eafd47574feb696a34a98f0a0df6525230884383068c1d3d6e0f15d1f7a9f"} Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.213007 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-6knjf" event={"ID":"680ec585-d304-48b0-9501-7af7e5bc503b","Type":"ContainerStarted","Data":"49dc327904b1979c19b9cc2f24f78e1b050b87ed0c721a53272305393a7a4d39"} Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.223167 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.223147905 podStartE2EDuration="2.223147905s" podCreationTimestamp="2025-11-26 05:28:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:11.215797384 +0000 UTC m=+149.398848970" watchObservedRunningTime="2025-11-26 05:28:11.223147905 +0000 UTC m=+149.406199491" Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.233081 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"370cf74ac560635e7b48210a719b34984414d16bc6ca352ec32c595a303f528f"} Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.234599 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z482k"] Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.235399 4871 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdvkb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.235470 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-rdvkb" podUID="317fac77-edf3-46a5-9635-1dd8bb83fea6" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.237660 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.240819 4871 patch_prober.go:28] interesting pod/router-default-5444994796-lb2sb container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 05:28:11 crc kubenswrapper[4871]: [-]has-synced failed: reason withheld Nov 26 05:28:11 crc kubenswrapper[4871]: [+]process-running ok Nov 26 05:28:11 crc kubenswrapper[4871]: healthz check failed Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.240851 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-lb2sb" podUID="a54f432f-761c-419d-9c57-654e4f81a28f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.245259 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-6knjf" podStartSLOduration=10.245250161 podStartE2EDuration="10.245250161s" podCreationTimestamp="2025-11-26 05:28:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:11.243710163 +0000 UTC m=+149.426761749" watchObservedRunningTime="2025-11-26 05:28:11.245250161 +0000 UTC m=+149.428301747" Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.329441 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hlfdn" Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.335870 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vx568"] Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.446244 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6c56g"] Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.550501 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-tcqk7"] Nov 26 05:28:11 crc kubenswrapper[4871]: I1126 05:28:11.682746 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hlfdn"] Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.232432 4871 patch_prober.go:28] interesting pod/router-default-5444994796-lb2sb container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 05:28:12 crc kubenswrapper[4871]: [-]has-synced failed: reason withheld Nov 26 05:28:12 crc kubenswrapper[4871]: [+]process-running ok Nov 26 05:28:12 crc kubenswrapper[4871]: healthz check failed Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.232912 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-lb2sb" podUID="a54f432f-761c-419d-9c57-654e4f81a28f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.240799 4871 generic.go:334] "Generic (PLEG): container finished" podID="4776b484-dfbf-4547-9409-e2317c476932" containerID="419bbfdf8818640e744c2c9d9e3a102b22d51e6af93b2f2a8b8edfa9a51b59af" exitCode=0 Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.241000 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hlfdn" event={"ID":"4776b484-dfbf-4547-9409-e2317c476932","Type":"ContainerDied","Data":"419bbfdf8818640e744c2c9d9e3a102b22d51e6af93b2f2a8b8edfa9a51b59af"} Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.241128 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hlfdn" event={"ID":"4776b484-dfbf-4547-9409-e2317c476932","Type":"ContainerStarted","Data":"2b92944619582b9ee23f16ffd0d76613470594428bcc3267fdf4e539699017bc"} Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.247312 4871 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.256314 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"253844bb9a182174f9ef16fd2f5e8373cea7fdfdb1306876ae3153ea25efa410"} Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.258692 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.258809 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vx568" event={"ID":"20dfe3fa-af2d-4906-bc8c-21d863489308","Type":"ContainerDied","Data":"19920aefe3e724ed0095c17809e56a69f82e308ecc2def3813d6cb2acf3c2033"} Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.258477 4871 generic.go:334] "Generic (PLEG): container finished" podID="20dfe3fa-af2d-4906-bc8c-21d863489308" containerID="19920aefe3e724ed0095c17809e56a69f82e308ecc2def3813d6cb2acf3c2033" exitCode=0 Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.259026 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vx568" event={"ID":"20dfe3fa-af2d-4906-bc8c-21d863489308","Type":"ContainerStarted","Data":"1cc594b1b535c24a4607fd9607547d02189c243aa739d09455c365925fbc5862"} Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.262614 4871 generic.go:334] "Generic (PLEG): container finished" podID="b042c85e-dbcc-488a-afca-602eadb2a09a" containerID="a36d4fcc698e9c7b2654edaffe605b5b47cd96fc18a45d0907f633a6cda30879" exitCode=0 Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.262706 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z482k" event={"ID":"b042c85e-dbcc-488a-afca-602eadb2a09a","Type":"ContainerDied","Data":"a36d4fcc698e9c7b2654edaffe605b5b47cd96fc18a45d0907f633a6cda30879"} Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.262733 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z482k" event={"ID":"b042c85e-dbcc-488a-afca-602eadb2a09a","Type":"ContainerStarted","Data":"b8ec6da5d418c74c5c2880b466e5f0d0619840a8bf9d289432576b5c4ba7f62a"} Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.273456 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" event={"ID":"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03","Type":"ContainerStarted","Data":"f3a15f5678b2b28216202a91e5ee144d1da708e54e6139d11b352a185b87917d"} Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.273499 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" event={"ID":"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03","Type":"ContainerStarted","Data":"fff78e86fe23eeab4c60eec2e48fc6dffb8c6ee990a4bf26d08094451c5aaa9f"} Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.273798 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.277391 4871 generic.go:334] "Generic (PLEG): container finished" podID="cb3f5110-df96-4946-b0a5-3439ab4e1724" containerID="2b65b3ae87375dbf86af9555f34774ddda3de3391ea7d761ffeecaebf56ed651" exitCode=0 Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.277497 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5" event={"ID":"cb3f5110-df96-4946-b0a5-3439ab4e1724","Type":"ContainerDied","Data":"2b65b3ae87375dbf86af9555f34774ddda3de3391ea7d761ffeecaebf56ed651"} Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.281887 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"e1c0884a2deb038bbb87f61e7ec7318e0b753706104073f9d23e0915be2542ed"} Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.284276 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"d5d8621e1b1cabdd25ec41bb659e2923696d6b8b9a1f424e6c35349140829c76"} Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.284400 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"e15e43451f00505d2ae1966e923dc2a0a43afcba6fdbd4cf95574dd396049d63"} Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.290150 4871 generic.go:334] "Generic (PLEG): container finished" podID="f7d5d2e5-f16d-4653-b6d4-4c5c75501655" containerID="db3971f53a06f1e57913083895a11b4cd09b7cdc0bbbad78ae3e62f9b561c173" exitCode=0 Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.290218 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6c56g" event={"ID":"f7d5d2e5-f16d-4653-b6d4-4c5c75501655","Type":"ContainerDied","Data":"db3971f53a06f1e57913083895a11b4cd09b7cdc0bbbad78ae3e62f9b561c173"} Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.290541 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6c56g" event={"ID":"f7d5d2e5-f16d-4653-b6d4-4c5c75501655","Type":"ContainerStarted","Data":"629b45c7b3f7e8fb01db38e58707a1404f26ae53bfded4457b1e833162d4847e"} Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.295872 4871 generic.go:334] "Generic (PLEG): container finished" podID="a07fd4ff-d54d-42ef-8223-039275189667" containerID="e08e3f03a93f20a5805de9ae363180ef1ea7ce61e57d5b04ad317c5c983f7d97" exitCode=0 Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.296135 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"a07fd4ff-d54d-42ef-8223-039275189667","Type":"ContainerDied","Data":"e08e3f03a93f20a5805de9ae363180ef1ea7ce61e57d5b04ad317c5c983f7d97"} Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.395997 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" podStartSLOduration=130.395984177 podStartE2EDuration="2m10.395984177s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:12.39408987 +0000 UTC m=+150.577141476" watchObservedRunningTime="2025-11-26 05:28:12.395984177 +0000 UTC m=+150.579035763" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.514423 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.518582 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7n98c"] Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.524421 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7n98c" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.528797 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.538771 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7n98c"] Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.676188 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85wmz\" (UniqueName: \"kubernetes.io/projected/13a2fbc4-3140-412d-b990-9398453dc21c-kube-api-access-85wmz\") pod \"redhat-marketplace-7n98c\" (UID: \"13a2fbc4-3140-412d-b990-9398453dc21c\") " pod="openshift-marketplace/redhat-marketplace-7n98c" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.676267 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13a2fbc4-3140-412d-b990-9398453dc21c-catalog-content\") pod \"redhat-marketplace-7n98c\" (UID: \"13a2fbc4-3140-412d-b990-9398453dc21c\") " pod="openshift-marketplace/redhat-marketplace-7n98c" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.676515 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13a2fbc4-3140-412d-b990-9398453dc21c-utilities\") pod \"redhat-marketplace-7n98c\" (UID: \"13a2fbc4-3140-412d-b990-9398453dc21c\") " pod="openshift-marketplace/redhat-marketplace-7n98c" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.712552 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.719816 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-ql4w4" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.778426 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13a2fbc4-3140-412d-b990-9398453dc21c-catalog-content\") pod \"redhat-marketplace-7n98c\" (UID: \"13a2fbc4-3140-412d-b990-9398453dc21c\") " pod="openshift-marketplace/redhat-marketplace-7n98c" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.778473 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85wmz\" (UniqueName: \"kubernetes.io/projected/13a2fbc4-3140-412d-b990-9398453dc21c-kube-api-access-85wmz\") pod \"redhat-marketplace-7n98c\" (UID: \"13a2fbc4-3140-412d-b990-9398453dc21c\") " pod="openshift-marketplace/redhat-marketplace-7n98c" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.778544 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13a2fbc4-3140-412d-b990-9398453dc21c-utilities\") pod \"redhat-marketplace-7n98c\" (UID: \"13a2fbc4-3140-412d-b990-9398453dc21c\") " pod="openshift-marketplace/redhat-marketplace-7n98c" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.779036 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13a2fbc4-3140-412d-b990-9398453dc21c-utilities\") pod \"redhat-marketplace-7n98c\" (UID: \"13a2fbc4-3140-412d-b990-9398453dc21c\") " pod="openshift-marketplace/redhat-marketplace-7n98c" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.779143 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13a2fbc4-3140-412d-b990-9398453dc21c-catalog-content\") pod \"redhat-marketplace-7n98c\" (UID: \"13a2fbc4-3140-412d-b990-9398453dc21c\") " pod="openshift-marketplace/redhat-marketplace-7n98c" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.835549 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85wmz\" (UniqueName: \"kubernetes.io/projected/13a2fbc4-3140-412d-b990-9398453dc21c-kube-api-access-85wmz\") pod \"redhat-marketplace-7n98c\" (UID: \"13a2fbc4-3140-412d-b990-9398453dc21c\") " pod="openshift-marketplace/redhat-marketplace-7n98c" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.848410 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7n98c" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.921694 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bnc7b"] Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.922689 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bnc7b" Nov 26 05:28:12 crc kubenswrapper[4871]: I1126 05:28:12.936612 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bnc7b"] Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.055395 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7n98c"] Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.056796 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.056872 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.058153 4871 patch_prober.go:28] interesting pod/console-f9d7485db-h5qx5 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.058213 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-h5qx5" podUID="65ed678d-1457-46e2-a59d-1b05e7bbee8c" containerName="console" probeResult="failure" output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.085992 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2ghg\" (UniqueName: \"kubernetes.io/projected/28cb4585-570c-471f-81b7-df6b52ccda23-kube-api-access-f2ghg\") pod \"redhat-marketplace-bnc7b\" (UID: \"28cb4585-570c-471f-81b7-df6b52ccda23\") " pod="openshift-marketplace/redhat-marketplace-bnc7b" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.086067 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28cb4585-570c-471f-81b7-df6b52ccda23-utilities\") pod \"redhat-marketplace-bnc7b\" (UID: \"28cb4585-570c-471f-81b7-df6b52ccda23\") " pod="openshift-marketplace/redhat-marketplace-bnc7b" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.086091 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28cb4585-570c-471f-81b7-df6b52ccda23-catalog-content\") pod \"redhat-marketplace-bnc7b\" (UID: \"28cb4585-570c-471f-81b7-df6b52ccda23\") " pod="openshift-marketplace/redhat-marketplace-bnc7b" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.188070 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2ghg\" (UniqueName: \"kubernetes.io/projected/28cb4585-570c-471f-81b7-df6b52ccda23-kube-api-access-f2ghg\") pod \"redhat-marketplace-bnc7b\" (UID: \"28cb4585-570c-471f-81b7-df6b52ccda23\") " pod="openshift-marketplace/redhat-marketplace-bnc7b" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.188155 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28cb4585-570c-471f-81b7-df6b52ccda23-utilities\") pod \"redhat-marketplace-bnc7b\" (UID: \"28cb4585-570c-471f-81b7-df6b52ccda23\") " pod="openshift-marketplace/redhat-marketplace-bnc7b" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.188196 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28cb4585-570c-471f-81b7-df6b52ccda23-catalog-content\") pod \"redhat-marketplace-bnc7b\" (UID: \"28cb4585-570c-471f-81b7-df6b52ccda23\") " pod="openshift-marketplace/redhat-marketplace-bnc7b" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.188711 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28cb4585-570c-471f-81b7-df6b52ccda23-catalog-content\") pod \"redhat-marketplace-bnc7b\" (UID: \"28cb4585-570c-471f-81b7-df6b52ccda23\") " pod="openshift-marketplace/redhat-marketplace-bnc7b" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.189373 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28cb4585-570c-471f-81b7-df6b52ccda23-utilities\") pod \"redhat-marketplace-bnc7b\" (UID: \"28cb4585-570c-471f-81b7-df6b52ccda23\") " pod="openshift-marketplace/redhat-marketplace-bnc7b" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.207467 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2ghg\" (UniqueName: \"kubernetes.io/projected/28cb4585-570c-471f-81b7-df6b52ccda23-kube-api-access-f2ghg\") pod \"redhat-marketplace-bnc7b\" (UID: \"28cb4585-570c-471f-81b7-df6b52ccda23\") " pod="openshift-marketplace/redhat-marketplace-bnc7b" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.231275 4871 patch_prober.go:28] interesting pod/router-default-5444994796-lb2sb container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 05:28:13 crc kubenswrapper[4871]: [-]has-synced failed: reason withheld Nov 26 05:28:13 crc kubenswrapper[4871]: [+]process-running ok Nov 26 05:28:13 crc kubenswrapper[4871]: healthz check failed Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.231355 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-lb2sb" podUID="a54f432f-761c-419d-9c57-654e4f81a28f" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.240118 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bnc7b" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.323631 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7n98c" event={"ID":"13a2fbc4-3140-412d-b990-9398453dc21c","Type":"ContainerStarted","Data":"1c544dca21fa3870ebcca76311a6c1603554e3298ccca45e5d6127178eea8ed1"} Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.324000 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7n98c" event={"ID":"13a2fbc4-3140-412d-b990-9398453dc21c","Type":"ContainerStarted","Data":"c3d799ffa824bf56c0e63ad5ccbd25f54015fa42553ef9cfde607580b6d61605"} Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.522113 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xpxnm"] Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.523439 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xpxnm" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.529576 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.531656 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xpxnm"] Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.576437 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.607578 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.694788 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb3f5110-df96-4946-b0a5-3439ab4e1724-secret-volume\") pod \"cb3f5110-df96-4946-b0a5-3439ab4e1724\" (UID: \"cb3f5110-df96-4946-b0a5-3439ab4e1724\") " Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.695224 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb3f5110-df96-4946-b0a5-3439ab4e1724-config-volume\") pod \"cb3f5110-df96-4946-b0a5-3439ab4e1724\" (UID: \"cb3f5110-df96-4946-b0a5-3439ab4e1724\") " Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.695321 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a07fd4ff-d54d-42ef-8223-039275189667-kube-api-access\") pod \"a07fd4ff-d54d-42ef-8223-039275189667\" (UID: \"a07fd4ff-d54d-42ef-8223-039275189667\") " Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.695382 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a07fd4ff-d54d-42ef-8223-039275189667-kubelet-dir\") pod \"a07fd4ff-d54d-42ef-8223-039275189667\" (UID: \"a07fd4ff-d54d-42ef-8223-039275189667\") " Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.695460 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kwn4k\" (UniqueName: \"kubernetes.io/projected/cb3f5110-df96-4946-b0a5-3439ab4e1724-kube-api-access-kwn4k\") pod \"cb3f5110-df96-4946-b0a5-3439ab4e1724\" (UID: \"cb3f5110-df96-4946-b0a5-3439ab4e1724\") " Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.695574 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a07fd4ff-d54d-42ef-8223-039275189667-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "a07fd4ff-d54d-42ef-8223-039275189667" (UID: "a07fd4ff-d54d-42ef-8223-039275189667"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.696551 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb3f5110-df96-4946-b0a5-3439ab4e1724-config-volume" (OuterVolumeSpecName: "config-volume") pod "cb3f5110-df96-4946-b0a5-3439ab4e1724" (UID: "cb3f5110-df96-4946-b0a5-3439ab4e1724"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.697435 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40308fed-3d95-4693-9efd-e44e891eb454-catalog-content\") pod \"redhat-operators-xpxnm\" (UID: \"40308fed-3d95-4693-9efd-e44e891eb454\") " pod="openshift-marketplace/redhat-operators-xpxnm" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.697600 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6skp\" (UniqueName: \"kubernetes.io/projected/40308fed-3d95-4693-9efd-e44e891eb454-kube-api-access-c6skp\") pod \"redhat-operators-xpxnm\" (UID: \"40308fed-3d95-4693-9efd-e44e891eb454\") " pod="openshift-marketplace/redhat-operators-xpxnm" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.697675 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40308fed-3d95-4693-9efd-e44e891eb454-utilities\") pod \"redhat-operators-xpxnm\" (UID: \"40308fed-3d95-4693-9efd-e44e891eb454\") " pod="openshift-marketplace/redhat-operators-xpxnm" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.697953 4871 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb3f5110-df96-4946-b0a5-3439ab4e1724-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.697970 4871 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a07fd4ff-d54d-42ef-8223-039275189667-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.704091 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb3f5110-df96-4946-b0a5-3439ab4e1724-kube-api-access-kwn4k" (OuterVolumeSpecName: "kube-api-access-kwn4k") pod "cb3f5110-df96-4946-b0a5-3439ab4e1724" (UID: "cb3f5110-df96-4946-b0a5-3439ab4e1724"). InnerVolumeSpecName "kube-api-access-kwn4k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.705206 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a07fd4ff-d54d-42ef-8223-039275189667-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "a07fd4ff-d54d-42ef-8223-039275189667" (UID: "a07fd4ff-d54d-42ef-8223-039275189667"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.708457 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb3f5110-df96-4946-b0a5-3439ab4e1724-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "cb3f5110-df96-4946-b0a5-3439ab4e1724" (UID: "cb3f5110-df96-4946-b0a5-3439ab4e1724"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.716200 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bnc7b"] Nov 26 05:28:13 crc kubenswrapper[4871]: W1126 05:28:13.724397 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod28cb4585_570c_471f_81b7_df6b52ccda23.slice/crio-404a68db30f5ab86be4b54c0e274e8866c432b861dee49198911a3f68a59fc76 WatchSource:0}: Error finding container 404a68db30f5ab86be4b54c0e274e8866c432b861dee49198911a3f68a59fc76: Status 404 returned error can't find the container with id 404a68db30f5ab86be4b54c0e274e8866c432b861dee49198911a3f68a59fc76 Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.798700 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40308fed-3d95-4693-9efd-e44e891eb454-catalog-content\") pod \"redhat-operators-xpxnm\" (UID: \"40308fed-3d95-4693-9efd-e44e891eb454\") " pod="openshift-marketplace/redhat-operators-xpxnm" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.802355 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6skp\" (UniqueName: \"kubernetes.io/projected/40308fed-3d95-4693-9efd-e44e891eb454-kube-api-access-c6skp\") pod \"redhat-operators-xpxnm\" (UID: \"40308fed-3d95-4693-9efd-e44e891eb454\") " pod="openshift-marketplace/redhat-operators-xpxnm" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.802467 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40308fed-3d95-4693-9efd-e44e891eb454-utilities\") pod \"redhat-operators-xpxnm\" (UID: \"40308fed-3d95-4693-9efd-e44e891eb454\") " pod="openshift-marketplace/redhat-operators-xpxnm" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.802924 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40308fed-3d95-4693-9efd-e44e891eb454-utilities\") pod \"redhat-operators-xpxnm\" (UID: \"40308fed-3d95-4693-9efd-e44e891eb454\") " pod="openshift-marketplace/redhat-operators-xpxnm" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.803101 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a07fd4ff-d54d-42ef-8223-039275189667-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.803120 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kwn4k\" (UniqueName: \"kubernetes.io/projected/cb3f5110-df96-4946-b0a5-3439ab4e1724-kube-api-access-kwn4k\") on node \"crc\" DevicePath \"\"" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.803134 4871 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb3f5110-df96-4946-b0a5-3439ab4e1724-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.805101 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40308fed-3d95-4693-9efd-e44e891eb454-catalog-content\") pod \"redhat-operators-xpxnm\" (UID: \"40308fed-3d95-4693-9efd-e44e891eb454\") " pod="openshift-marketplace/redhat-operators-xpxnm" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.837678 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6skp\" (UniqueName: \"kubernetes.io/projected/40308fed-3d95-4693-9efd-e44e891eb454-kube-api-access-c6skp\") pod \"redhat-operators-xpxnm\" (UID: \"40308fed-3d95-4693-9efd-e44e891eb454\") " pod="openshift-marketplace/redhat-operators-xpxnm" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.841706 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xpxnm" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.872099 4871 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdvkb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.872154 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-rdvkb" podUID="317fac77-edf3-46a5-9635-1dd8bb83fea6" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.872100 4871 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdvkb container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.872407 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-rdvkb" podUID="317fac77-edf3-46a5-9635-1dd8bb83fea6" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.919708 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-znrdp"] Nov 26 05:28:13 crc kubenswrapper[4871]: E1126 05:28:13.919965 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a07fd4ff-d54d-42ef-8223-039275189667" containerName="pruner" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.919980 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="a07fd4ff-d54d-42ef-8223-039275189667" containerName="pruner" Nov 26 05:28:13 crc kubenswrapper[4871]: E1126 05:28:13.919990 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb3f5110-df96-4946-b0a5-3439ab4e1724" containerName="collect-profiles" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.919997 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb3f5110-df96-4946-b0a5-3439ab4e1724" containerName="collect-profiles" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.920119 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="a07fd4ff-d54d-42ef-8223-039275189667" containerName="pruner" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.920134 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb3f5110-df96-4946-b0a5-3439ab4e1724" containerName="collect-profiles" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.922345 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-znrdp" Nov 26 05:28:13 crc kubenswrapper[4871]: I1126 05:28:13.935804 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-znrdp"] Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.005110 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nxx6\" (UniqueName: \"kubernetes.io/projected/dcd2270a-9f70-4882-89f6-b916ef8fcc5d-kube-api-access-7nxx6\") pod \"redhat-operators-znrdp\" (UID: \"dcd2270a-9f70-4882-89f6-b916ef8fcc5d\") " pod="openshift-marketplace/redhat-operators-znrdp" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.005604 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcd2270a-9f70-4882-89f6-b916ef8fcc5d-utilities\") pod \"redhat-operators-znrdp\" (UID: \"dcd2270a-9f70-4882-89f6-b916ef8fcc5d\") " pod="openshift-marketplace/redhat-operators-znrdp" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.005641 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcd2270a-9f70-4882-89f6-b916ef8fcc5d-catalog-content\") pod \"redhat-operators-znrdp\" (UID: \"dcd2270a-9f70-4882-89f6-b916ef8fcc5d\") " pod="openshift-marketplace/redhat-operators-znrdp" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.106687 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcd2270a-9f70-4882-89f6-b916ef8fcc5d-utilities\") pod \"redhat-operators-znrdp\" (UID: \"dcd2270a-9f70-4882-89f6-b916ef8fcc5d\") " pod="openshift-marketplace/redhat-operators-znrdp" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.106735 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcd2270a-9f70-4882-89f6-b916ef8fcc5d-catalog-content\") pod \"redhat-operators-znrdp\" (UID: \"dcd2270a-9f70-4882-89f6-b916ef8fcc5d\") " pod="openshift-marketplace/redhat-operators-znrdp" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.106785 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nxx6\" (UniqueName: \"kubernetes.io/projected/dcd2270a-9f70-4882-89f6-b916ef8fcc5d-kube-api-access-7nxx6\") pod \"redhat-operators-znrdp\" (UID: \"dcd2270a-9f70-4882-89f6-b916ef8fcc5d\") " pod="openshift-marketplace/redhat-operators-znrdp" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.107419 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcd2270a-9f70-4882-89f6-b916ef8fcc5d-catalog-content\") pod \"redhat-operators-znrdp\" (UID: \"dcd2270a-9f70-4882-89f6-b916ef8fcc5d\") " pod="openshift-marketplace/redhat-operators-znrdp" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.108010 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcd2270a-9f70-4882-89f6-b916ef8fcc5d-utilities\") pod \"redhat-operators-znrdp\" (UID: \"dcd2270a-9f70-4882-89f6-b916ef8fcc5d\") " pod="openshift-marketplace/redhat-operators-znrdp" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.124610 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nxx6\" (UniqueName: \"kubernetes.io/projected/dcd2270a-9f70-4882-89f6-b916ef8fcc5d-kube-api-access-7nxx6\") pod \"redhat-operators-znrdp\" (UID: \"dcd2270a-9f70-4882-89f6-b916ef8fcc5d\") " pod="openshift-marketplace/redhat-operators-znrdp" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.229226 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.232426 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.243933 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-znrdp" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.244021 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.244605 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.253206 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.333167 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xpxnm"] Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.359162 4871 generic.go:334] "Generic (PLEG): container finished" podID="28cb4585-570c-471f-81b7-df6b52ccda23" containerID="9d235161770cbfa40eea52471de4cc6d178b7991da072b81699daf473a3b9fd5" exitCode=0 Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.359238 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bnc7b" event={"ID":"28cb4585-570c-471f-81b7-df6b52ccda23","Type":"ContainerDied","Data":"9d235161770cbfa40eea52471de4cc6d178b7991da072b81699daf473a3b9fd5"} Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.359269 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bnc7b" event={"ID":"28cb4585-570c-471f-81b7-df6b52ccda23","Type":"ContainerStarted","Data":"404a68db30f5ab86be4b54c0e274e8866c432b861dee49198911a3f68a59fc76"} Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.363793 4871 generic.go:334] "Generic (PLEG): container finished" podID="13a2fbc4-3140-412d-b990-9398453dc21c" containerID="1c544dca21fa3870ebcca76311a6c1603554e3298ccca45e5d6127178eea8ed1" exitCode=0 Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.363913 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7n98c" event={"ID":"13a2fbc4-3140-412d-b990-9398453dc21c","Type":"ContainerDied","Data":"1c544dca21fa3870ebcca76311a6c1603554e3298ccca45e5d6127178eea8ed1"} Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.369195 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"a07fd4ff-d54d-42ef-8223-039275189667","Type":"ContainerDied","Data":"9d47dd624066c37b8b83213aec4c3bd3dc7fecd2bd23b7a22dcb8b0b0c93b4fb"} Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.369226 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d47dd624066c37b8b83213aec4c3bd3dc7fecd2bd23b7a22dcb8b0b0c93b4fb" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.369300 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.373208 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5" event={"ID":"cb3f5110-df96-4946-b0a5-3439ab4e1724","Type":"ContainerDied","Data":"6a185f561958ca7130e1e1cd5db504fcff6a6d19b609555caa7f6aaca8819a21"} Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.373267 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6a185f561958ca7130e1e1cd5db504fcff6a6d19b609555caa7f6aaca8819a21" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.373373 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.383725 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-lb2sb" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.384367 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2bzqb" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.771793 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-znrdp"] Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.873573 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.874844 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.877686 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.878614 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.888382 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.923080 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2ced5062-5584-44c8-96aa-b98c22d90ef6-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"2ced5062-5584-44c8-96aa-b98c22d90ef6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 05:28:14 crc kubenswrapper[4871]: I1126 05:28:14.923132 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ced5062-5584-44c8-96aa-b98c22d90ef6-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"2ced5062-5584-44c8-96aa-b98c22d90ef6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 05:28:15 crc kubenswrapper[4871]: I1126 05:28:15.024169 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2ced5062-5584-44c8-96aa-b98c22d90ef6-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"2ced5062-5584-44c8-96aa-b98c22d90ef6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 05:28:15 crc kubenswrapper[4871]: I1126 05:28:15.024224 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ced5062-5584-44c8-96aa-b98c22d90ef6-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"2ced5062-5584-44c8-96aa-b98c22d90ef6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 05:28:15 crc kubenswrapper[4871]: I1126 05:28:15.024362 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2ced5062-5584-44c8-96aa-b98c22d90ef6-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"2ced5062-5584-44c8-96aa-b98c22d90ef6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 05:28:15 crc kubenswrapper[4871]: I1126 05:28:15.041736 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ced5062-5584-44c8-96aa-b98c22d90ef6-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"2ced5062-5584-44c8-96aa-b98c22d90ef6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 05:28:15 crc kubenswrapper[4871]: I1126 05:28:15.221994 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 05:28:15 crc kubenswrapper[4871]: I1126 05:28:15.384781 4871 generic.go:334] "Generic (PLEG): container finished" podID="40308fed-3d95-4693-9efd-e44e891eb454" containerID="b1a39ceaad8b9acaa64c3fd3394a1dba86959248b9d698be18b95f3f320f6a18" exitCode=0 Nov 26 05:28:15 crc kubenswrapper[4871]: I1126 05:28:15.384853 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xpxnm" event={"ID":"40308fed-3d95-4693-9efd-e44e891eb454","Type":"ContainerDied","Data":"b1a39ceaad8b9acaa64c3fd3394a1dba86959248b9d698be18b95f3f320f6a18"} Nov 26 05:28:15 crc kubenswrapper[4871]: I1126 05:28:15.384879 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xpxnm" event={"ID":"40308fed-3d95-4693-9efd-e44e891eb454","Type":"ContainerStarted","Data":"0bd81754b16a5ae019424deb7df461f0816de13c9086c55283798278143d6ffe"} Nov 26 05:28:15 crc kubenswrapper[4871]: I1126 05:28:15.390937 4871 generic.go:334] "Generic (PLEG): container finished" podID="dcd2270a-9f70-4882-89f6-b916ef8fcc5d" containerID="4cad2e98ed79003deaa126a4270cdb2c079f14ba18c40de718d12af2d5650e9f" exitCode=0 Nov 26 05:28:15 crc kubenswrapper[4871]: I1126 05:28:15.391195 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znrdp" event={"ID":"dcd2270a-9f70-4882-89f6-b916ef8fcc5d","Type":"ContainerDied","Data":"4cad2e98ed79003deaa126a4270cdb2c079f14ba18c40de718d12af2d5650e9f"} Nov 26 05:28:15 crc kubenswrapper[4871]: I1126 05:28:15.391244 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znrdp" event={"ID":"dcd2270a-9f70-4882-89f6-b916ef8fcc5d","Type":"ContainerStarted","Data":"b056db94caf86d1b4159aae9160fda348949c491f841a7d5679c97402266064d"} Nov 26 05:28:15 crc kubenswrapper[4871]: I1126 05:28:15.639160 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 05:28:16 crc kubenswrapper[4871]: I1126 05:28:16.426021 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"2ced5062-5584-44c8-96aa-b98c22d90ef6","Type":"ContainerStarted","Data":"00f6a9398edd61e09d055f969b2183ca4084b013ddf6732d401529096b9183bd"} Nov 26 05:28:16 crc kubenswrapper[4871]: I1126 05:28:16.426317 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"2ced5062-5584-44c8-96aa-b98c22d90ef6","Type":"ContainerStarted","Data":"ac59b56834c41b162f1c108d5bf8992c43f822f8992c00f1c0bb03bde5a019a4"} Nov 26 05:28:16 crc kubenswrapper[4871]: I1126 05:28:16.467674 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.4676565520000002 podStartE2EDuration="2.467656552s" podCreationTimestamp="2025-11-26 05:28:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:16.464272439 +0000 UTC m=+154.647324025" watchObservedRunningTime="2025-11-26 05:28:16.467656552 +0000 UTC m=+154.650708138" Nov 26 05:28:17 crc kubenswrapper[4871]: I1126 05:28:17.444356 4871 generic.go:334] "Generic (PLEG): container finished" podID="2ced5062-5584-44c8-96aa-b98c22d90ef6" containerID="00f6a9398edd61e09d055f969b2183ca4084b013ddf6732d401529096b9183bd" exitCode=0 Nov 26 05:28:17 crc kubenswrapper[4871]: I1126 05:28:17.444479 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"2ced5062-5584-44c8-96aa-b98c22d90ef6","Type":"ContainerDied","Data":"00f6a9398edd61e09d055f969b2183ca4084b013ddf6732d401529096b9183bd"} Nov 26 05:28:19 crc kubenswrapper[4871]: I1126 05:28:19.351925 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-nntmb" Nov 26 05:28:23 crc kubenswrapper[4871]: I1126 05:28:23.093935 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:23 crc kubenswrapper[4871]: I1126 05:28:23.099163 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:28:23 crc kubenswrapper[4871]: I1126 05:28:23.623746 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:28:23 crc kubenswrapper[4871]: I1126 05:28:23.623799 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:28:23 crc kubenswrapper[4871]: I1126 05:28:23.871709 4871 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdvkb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 26 05:28:23 crc kubenswrapper[4871]: I1126 05:28:23.872215 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-rdvkb" podUID="317fac77-edf3-46a5-9635-1dd8bb83fea6" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 26 05:28:23 crc kubenswrapper[4871]: I1126 05:28:23.871710 4871 patch_prober.go:28] interesting pod/downloads-7954f5f757-rdvkb container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 26 05:28:23 crc kubenswrapper[4871]: I1126 05:28:23.872289 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-rdvkb" podUID="317fac77-edf3-46a5-9635-1dd8bb83fea6" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 26 05:28:24 crc kubenswrapper[4871]: I1126 05:28:24.939046 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs\") pod \"network-metrics-daemon-z2d5h\" (UID: \"30b3c82b-ca2a-4821-86e0-94aa2afce847\") " pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:28:24 crc kubenswrapper[4871]: I1126 05:28:24.950635 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/30b3c82b-ca2a-4821-86e0-94aa2afce847-metrics-certs\") pod \"network-metrics-daemon-z2d5h\" (UID: \"30b3c82b-ca2a-4821-86e0-94aa2afce847\") " pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:28:25 crc kubenswrapper[4871]: I1126 05:28:25.032123 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-z2d5h" Nov 26 05:28:28 crc kubenswrapper[4871]: I1126 05:28:28.336859 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 05:28:28 crc kubenswrapper[4871]: I1126 05:28:28.490261 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2ced5062-5584-44c8-96aa-b98c22d90ef6-kubelet-dir\") pod \"2ced5062-5584-44c8-96aa-b98c22d90ef6\" (UID: \"2ced5062-5584-44c8-96aa-b98c22d90ef6\") " Nov 26 05:28:28 crc kubenswrapper[4871]: I1126 05:28:28.490691 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ced5062-5584-44c8-96aa-b98c22d90ef6-kube-api-access\") pod \"2ced5062-5584-44c8-96aa-b98c22d90ef6\" (UID: \"2ced5062-5584-44c8-96aa-b98c22d90ef6\") " Nov 26 05:28:28 crc kubenswrapper[4871]: I1126 05:28:28.490386 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2ced5062-5584-44c8-96aa-b98c22d90ef6-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "2ced5062-5584-44c8-96aa-b98c22d90ef6" (UID: "2ced5062-5584-44c8-96aa-b98c22d90ef6"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:28:28 crc kubenswrapper[4871]: I1126 05:28:28.511244 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ced5062-5584-44c8-96aa-b98c22d90ef6-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "2ced5062-5584-44c8-96aa-b98c22d90ef6" (UID: "2ced5062-5584-44c8-96aa-b98c22d90ef6"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:28:28 crc kubenswrapper[4871]: I1126 05:28:28.555206 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"2ced5062-5584-44c8-96aa-b98c22d90ef6","Type":"ContainerDied","Data":"ac59b56834c41b162f1c108d5bf8992c43f822f8992c00f1c0bb03bde5a019a4"} Nov 26 05:28:28 crc kubenswrapper[4871]: I1126 05:28:28.555254 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ac59b56834c41b162f1c108d5bf8992c43f822f8992c00f1c0bb03bde5a019a4" Nov 26 05:28:28 crc kubenswrapper[4871]: I1126 05:28:28.555315 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 05:28:28 crc kubenswrapper[4871]: I1126 05:28:28.562832 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-z2d5h"] Nov 26 05:28:28 crc kubenswrapper[4871]: I1126 05:28:28.592140 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ced5062-5584-44c8-96aa-b98c22d90ef6-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 05:28:28 crc kubenswrapper[4871]: I1126 05:28:28.592187 4871 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2ced5062-5584-44c8-96aa-b98c22d90ef6-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 05:28:31 crc kubenswrapper[4871]: I1126 05:28:31.026372 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:28:33 crc kubenswrapper[4871]: I1126 05:28:33.879138 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-rdvkb" Nov 26 05:28:38 crc kubenswrapper[4871]: I1126 05:28:38.614441 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" event={"ID":"30b3c82b-ca2a-4821-86e0-94aa2afce847","Type":"ContainerStarted","Data":"ce1286011d8e667e7d89c25628051d7defe8aa876fbebcd6607cc70bed0bd6f3"} Nov 26 05:28:41 crc kubenswrapper[4871]: E1126 05:28:41.594883 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 26 05:28:41 crc kubenswrapper[4871]: E1126 05:28:41.595094 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7lhxd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-vx568_openshift-marketplace(20dfe3fa-af2d-4906-bc8c-21d863489308): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 05:28:41 crc kubenswrapper[4871]: E1126 05:28:41.596344 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-vx568" podUID="20dfe3fa-af2d-4906-bc8c-21d863489308" Nov 26 05:28:43 crc kubenswrapper[4871]: E1126 05:28:43.351450 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-vx568" podUID="20dfe3fa-af2d-4906-bc8c-21d863489308" Nov 26 05:28:43 crc kubenswrapper[4871]: E1126 05:28:43.442340 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 26 05:28:43 crc kubenswrapper[4871]: E1126 05:28:43.442811 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f2ghg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-bnc7b_openshift-marketplace(28cb4585-570c-471f-81b7-df6b52ccda23): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 05:28:43 crc kubenswrapper[4871]: E1126 05:28:43.444390 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-bnc7b" podUID="28cb4585-570c-471f-81b7-df6b52ccda23" Nov 26 05:28:43 crc kubenswrapper[4871]: E1126 05:28:43.478144 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 26 05:28:43 crc kubenswrapper[4871]: E1126 05:28:43.478357 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q8bx4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-hlfdn_openshift-marketplace(4776b484-dfbf-4547-9409-e2317c476932): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 05:28:43 crc kubenswrapper[4871]: E1126 05:28:43.479567 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-hlfdn" podUID="4776b484-dfbf-4547-9409-e2317c476932" Nov 26 05:28:44 crc kubenswrapper[4871]: I1126 05:28:44.045454 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rwdp7" Nov 26 05:28:46 crc kubenswrapper[4871]: E1126 05:28:46.144731 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-bnc7b" podUID="28cb4585-570c-471f-81b7-df6b52ccda23" Nov 26 05:28:46 crc kubenswrapper[4871]: E1126 05:28:46.145362 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-hlfdn" podUID="4776b484-dfbf-4547-9409-e2317c476932" Nov 26 05:28:46 crc kubenswrapper[4871]: E1126 05:28:46.240429 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 26 05:28:46 crc kubenswrapper[4871]: E1126 05:28:46.240703 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-c6skp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-xpxnm_openshift-marketplace(40308fed-3d95-4693-9efd-e44e891eb454): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 05:28:46 crc kubenswrapper[4871]: E1126 05:28:46.241802 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-xpxnm" podUID="40308fed-3d95-4693-9efd-e44e891eb454" Nov 26 05:28:46 crc kubenswrapper[4871]: E1126 05:28:46.241805 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 26 05:28:46 crc kubenswrapper[4871]: E1126 05:28:46.241885 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-85wmz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-7n98c_openshift-marketplace(13a2fbc4-3140-412d-b990-9398453dc21c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 05:28:46 crc kubenswrapper[4871]: E1126 05:28:46.243011 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-7n98c" podUID="13a2fbc4-3140-412d-b990-9398453dc21c" Nov 26 05:28:46 crc kubenswrapper[4871]: E1126 05:28:46.258985 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 26 05:28:46 crc kubenswrapper[4871]: E1126 05:28:46.259096 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7nxx6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-znrdp_openshift-marketplace(dcd2270a-9f70-4882-89f6-b916ef8fcc5d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 05:28:46 crc kubenswrapper[4871]: E1126 05:28:46.260857 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-znrdp" podUID="dcd2270a-9f70-4882-89f6-b916ef8fcc5d" Nov 26 05:28:46 crc kubenswrapper[4871]: I1126 05:28:46.658305 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" event={"ID":"30b3c82b-ca2a-4821-86e0-94aa2afce847","Type":"ContainerStarted","Data":"71bf36069aa2012040630a66e7b60a0bc7dfb8931c9ba0c333db10dac882061a"} Nov 26 05:28:46 crc kubenswrapper[4871]: I1126 05:28:46.658895 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-z2d5h" event={"ID":"30b3c82b-ca2a-4821-86e0-94aa2afce847","Type":"ContainerStarted","Data":"884848ef44b6923c0d74597bf0efd7da6a3e19bca8b73cb5fbb2a60f61c23a95"} Nov 26 05:28:46 crc kubenswrapper[4871]: I1126 05:28:46.660312 4871 generic.go:334] "Generic (PLEG): container finished" podID="b042c85e-dbcc-488a-afca-602eadb2a09a" containerID="ed3921840d56d1925627b97aabc59f2724d886b831ea77c3e319793bd1c6fb8a" exitCode=0 Nov 26 05:28:46 crc kubenswrapper[4871]: I1126 05:28:46.660383 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z482k" event={"ID":"b042c85e-dbcc-488a-afca-602eadb2a09a","Type":"ContainerDied","Data":"ed3921840d56d1925627b97aabc59f2724d886b831ea77c3e319793bd1c6fb8a"} Nov 26 05:28:46 crc kubenswrapper[4871]: I1126 05:28:46.664071 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6c56g" event={"ID":"f7d5d2e5-f16d-4653-b6d4-4c5c75501655","Type":"ContainerStarted","Data":"7c927bd78eddbbca0fa83481dfcc226e88f48873bc33f8295cb3fb2d2ab4f57c"} Nov 26 05:28:46 crc kubenswrapper[4871]: E1126 05:28:46.665154 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-7n98c" podUID="13a2fbc4-3140-412d-b990-9398453dc21c" Nov 26 05:28:46 crc kubenswrapper[4871]: E1126 05:28:46.666145 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-znrdp" podUID="dcd2270a-9f70-4882-89f6-b916ef8fcc5d" Nov 26 05:28:46 crc kubenswrapper[4871]: E1126 05:28:46.671737 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-xpxnm" podUID="40308fed-3d95-4693-9efd-e44e891eb454" Nov 26 05:28:46 crc kubenswrapper[4871]: I1126 05:28:46.692319 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-z2d5h" podStartSLOduration=164.692294614 podStartE2EDuration="2m44.692294614s" podCreationTimestamp="2025-11-26 05:26:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:28:46.67713511 +0000 UTC m=+184.860186716" watchObservedRunningTime="2025-11-26 05:28:46.692294614 +0000 UTC m=+184.875346210" Nov 26 05:28:47 crc kubenswrapper[4871]: I1126 05:28:47.674958 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z482k" event={"ID":"b042c85e-dbcc-488a-afca-602eadb2a09a","Type":"ContainerStarted","Data":"1e36799ecca5371d19e6855a4b72bba207788b18f07f925614f527fa974a6730"} Nov 26 05:28:47 crc kubenswrapper[4871]: I1126 05:28:47.678882 4871 generic.go:334] "Generic (PLEG): container finished" podID="f7d5d2e5-f16d-4653-b6d4-4c5c75501655" containerID="7c927bd78eddbbca0fa83481dfcc226e88f48873bc33f8295cb3fb2d2ab4f57c" exitCode=0 Nov 26 05:28:47 crc kubenswrapper[4871]: I1126 05:28:47.679067 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6c56g" event={"ID":"f7d5d2e5-f16d-4653-b6d4-4c5c75501655","Type":"ContainerDied","Data":"7c927bd78eddbbca0fa83481dfcc226e88f48873bc33f8295cb3fb2d2ab4f57c"} Nov 26 05:28:47 crc kubenswrapper[4871]: I1126 05:28:47.679126 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6c56g" event={"ID":"f7d5d2e5-f16d-4653-b6d4-4c5c75501655","Type":"ContainerStarted","Data":"933ec83cf79ef43e686c9c96b05d22bc9f852b5199807490c99d1af2f39dd585"} Nov 26 05:28:47 crc kubenswrapper[4871]: I1126 05:28:47.707445 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-z482k" podStartSLOduration=2.885442344 podStartE2EDuration="37.707415582s" podCreationTimestamp="2025-11-26 05:28:10 +0000 UTC" firstStartedPulling="2025-11-26 05:28:12.273331818 +0000 UTC m=+150.456383394" lastFinishedPulling="2025-11-26 05:28:47.095305006 +0000 UTC m=+185.278356632" observedRunningTime="2025-11-26 05:28:47.701600638 +0000 UTC m=+185.884652264" watchObservedRunningTime="2025-11-26 05:28:47.707415582 +0000 UTC m=+185.890467208" Nov 26 05:28:47 crc kubenswrapper[4871]: I1126 05:28:47.732571 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6c56g" podStartSLOduration=2.916605863 podStartE2EDuration="37.732549383s" podCreationTimestamp="2025-11-26 05:28:10 +0000 UTC" firstStartedPulling="2025-11-26 05:28:12.291520937 +0000 UTC m=+150.474572523" lastFinishedPulling="2025-11-26 05:28:47.107464417 +0000 UTC m=+185.290516043" observedRunningTime="2025-11-26 05:28:47.730665566 +0000 UTC m=+185.913717192" watchObservedRunningTime="2025-11-26 05:28:47.732549383 +0000 UTC m=+185.915600979" Nov 26 05:28:50 crc kubenswrapper[4871]: I1126 05:28:50.552650 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 05:28:50 crc kubenswrapper[4871]: I1126 05:28:50.719113 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-z482k" Nov 26 05:28:50 crc kubenswrapper[4871]: I1126 05:28:50.719175 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-z482k" Nov 26 05:28:50 crc kubenswrapper[4871]: I1126 05:28:50.858765 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-z482k" Nov 26 05:28:51 crc kubenswrapper[4871]: I1126 05:28:51.117559 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6c56g" Nov 26 05:28:51 crc kubenswrapper[4871]: I1126 05:28:51.117648 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6c56g" Nov 26 05:28:51 crc kubenswrapper[4871]: I1126 05:28:51.174365 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6c56g" Nov 26 05:28:53 crc kubenswrapper[4871]: I1126 05:28:53.614598 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:28:53 crc kubenswrapper[4871]: I1126 05:28:53.614672 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:28:57 crc kubenswrapper[4871]: I1126 05:28:57.739860 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vx568" event={"ID":"20dfe3fa-af2d-4906-bc8c-21d863489308","Type":"ContainerStarted","Data":"c0940145b0104827f04c6547d4ae8675502ce7ac81fb8530c8f8644adf10c2e8"} Nov 26 05:28:58 crc kubenswrapper[4871]: I1126 05:28:58.747843 4871 generic.go:334] "Generic (PLEG): container finished" podID="20dfe3fa-af2d-4906-bc8c-21d863489308" containerID="c0940145b0104827f04c6547d4ae8675502ce7ac81fb8530c8f8644adf10c2e8" exitCode=0 Nov 26 05:28:58 crc kubenswrapper[4871]: I1126 05:28:58.747931 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vx568" event={"ID":"20dfe3fa-af2d-4906-bc8c-21d863489308","Type":"ContainerDied","Data":"c0940145b0104827f04c6547d4ae8675502ce7ac81fb8530c8f8644adf10c2e8"} Nov 26 05:28:58 crc kubenswrapper[4871]: I1126 05:28:58.750550 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znrdp" event={"ID":"dcd2270a-9f70-4882-89f6-b916ef8fcc5d","Type":"ContainerStarted","Data":"b6f60725213f1fcfa8aebe72ac82ff598e66f296827607448877cc025950ad69"} Nov 26 05:28:59 crc kubenswrapper[4871]: I1126 05:28:59.757343 4871 generic.go:334] "Generic (PLEG): container finished" podID="4776b484-dfbf-4547-9409-e2317c476932" containerID="07f4eaca5aa84c51c7d255d93cc663cb5bf3712d5a814fd8bc4b967c888f6ea9" exitCode=0 Nov 26 05:28:59 crc kubenswrapper[4871]: I1126 05:28:59.757432 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hlfdn" event={"ID":"4776b484-dfbf-4547-9409-e2317c476932","Type":"ContainerDied","Data":"07f4eaca5aa84c51c7d255d93cc663cb5bf3712d5a814fd8bc4b967c888f6ea9"} Nov 26 05:28:59 crc kubenswrapper[4871]: I1126 05:28:59.759111 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vx568" event={"ID":"20dfe3fa-af2d-4906-bc8c-21d863489308","Type":"ContainerStarted","Data":"f102650fe912485f8cfd47d1c8523f7c5e26f3dd1462dfea41272d17172cb1a5"} Nov 26 05:28:59 crc kubenswrapper[4871]: I1126 05:28:59.763327 4871 generic.go:334] "Generic (PLEG): container finished" podID="dcd2270a-9f70-4882-89f6-b916ef8fcc5d" containerID="b6f60725213f1fcfa8aebe72ac82ff598e66f296827607448877cc025950ad69" exitCode=0 Nov 26 05:28:59 crc kubenswrapper[4871]: I1126 05:28:59.763366 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znrdp" event={"ID":"dcd2270a-9f70-4882-89f6-b916ef8fcc5d","Type":"ContainerDied","Data":"b6f60725213f1fcfa8aebe72ac82ff598e66f296827607448877cc025950ad69"} Nov 26 05:28:59 crc kubenswrapper[4871]: I1126 05:28:59.810536 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vx568" podStartSLOduration=2.729732281 podStartE2EDuration="49.810505244s" podCreationTimestamp="2025-11-26 05:28:10 +0000 UTC" firstStartedPulling="2025-11-26 05:28:12.26166825 +0000 UTC m=+150.444719836" lastFinishedPulling="2025-11-26 05:28:59.342441213 +0000 UTC m=+197.525492799" observedRunningTime="2025-11-26 05:28:59.810410822 +0000 UTC m=+197.993462418" watchObservedRunningTime="2025-11-26 05:28:59.810505244 +0000 UTC m=+197.993556840" Nov 26 05:29:00 crc kubenswrapper[4871]: I1126 05:29:00.772061 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znrdp" event={"ID":"dcd2270a-9f70-4882-89f6-b916ef8fcc5d","Type":"ContainerStarted","Data":"cdf16438ccfbac067ba5096fe957db52c5054fc78375bc8a4874fd98d6b24f0b"} Nov 26 05:29:00 crc kubenswrapper[4871]: I1126 05:29:00.773900 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hlfdn" event={"ID":"4776b484-dfbf-4547-9409-e2317c476932","Type":"ContainerStarted","Data":"1e4198ab9938de19748c2cace86ef43087618a568d2ef1e3290c3c62df7787ad"} Nov 26 05:29:00 crc kubenswrapper[4871]: I1126 05:29:00.778849 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-z482k" Nov 26 05:29:00 crc kubenswrapper[4871]: I1126 05:29:00.788316 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-znrdp" podStartSLOduration=3.00041959 podStartE2EDuration="47.788299356s" podCreationTimestamp="2025-11-26 05:28:13 +0000 UTC" firstStartedPulling="2025-11-26 05:28:15.397569677 +0000 UTC m=+153.580621263" lastFinishedPulling="2025-11-26 05:29:00.185449443 +0000 UTC m=+198.368501029" observedRunningTime="2025-11-26 05:29:00.786290352 +0000 UTC m=+198.969341938" watchObservedRunningTime="2025-11-26 05:29:00.788299356 +0000 UTC m=+198.971350942" Nov 26 05:29:00 crc kubenswrapper[4871]: I1126 05:29:00.807756 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hlfdn" podStartSLOduration=2.828906732 podStartE2EDuration="50.807735892s" podCreationTimestamp="2025-11-26 05:28:10 +0000 UTC" firstStartedPulling="2025-11-26 05:28:12.247007688 +0000 UTC m=+150.430059274" lastFinishedPulling="2025-11-26 05:29:00.225836838 +0000 UTC m=+198.408888434" observedRunningTime="2025-11-26 05:29:00.804786662 +0000 UTC m=+198.987838248" watchObservedRunningTime="2025-11-26 05:29:00.807735892 +0000 UTC m=+198.990787488" Nov 26 05:29:00 crc kubenswrapper[4871]: I1126 05:29:00.920631 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vx568" Nov 26 05:29:00 crc kubenswrapper[4871]: I1126 05:29:00.920678 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vx568" Nov 26 05:29:01 crc kubenswrapper[4871]: I1126 05:29:01.154604 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6c56g" Nov 26 05:29:01 crc kubenswrapper[4871]: I1126 05:29:01.331081 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hlfdn" Nov 26 05:29:01 crc kubenswrapper[4871]: I1126 05:29:01.331126 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hlfdn" Nov 26 05:29:01 crc kubenswrapper[4871]: I1126 05:29:01.780196 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xpxnm" event={"ID":"40308fed-3d95-4693-9efd-e44e891eb454","Type":"ContainerStarted","Data":"493405ab0c343a579ba9dbee9856bce9592751cee8082fdf3930e8fb542d8abb"} Nov 26 05:29:01 crc kubenswrapper[4871]: I1126 05:29:01.979891 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-vx568" podUID="20dfe3fa-af2d-4906-bc8c-21d863489308" containerName="registry-server" probeResult="failure" output=< Nov 26 05:29:01 crc kubenswrapper[4871]: timeout: failed to connect service ":50051" within 1s Nov 26 05:29:01 crc kubenswrapper[4871]: > Nov 26 05:29:02 crc kubenswrapper[4871]: I1126 05:29:02.406642 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-hlfdn" podUID="4776b484-dfbf-4547-9409-e2317c476932" containerName="registry-server" probeResult="failure" output=< Nov 26 05:29:02 crc kubenswrapper[4871]: timeout: failed to connect service ":50051" within 1s Nov 26 05:29:02 crc kubenswrapper[4871]: > Nov 26 05:29:02 crc kubenswrapper[4871]: I1126 05:29:02.787759 4871 generic.go:334] "Generic (PLEG): container finished" podID="13a2fbc4-3140-412d-b990-9398453dc21c" containerID="feea5d8d7bf7aac13b172362a82a64dafac1eb2f39d6a5add028ef2ca7297ec3" exitCode=0 Nov 26 05:29:02 crc kubenswrapper[4871]: I1126 05:29:02.787838 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7n98c" event={"ID":"13a2fbc4-3140-412d-b990-9398453dc21c","Type":"ContainerDied","Data":"feea5d8d7bf7aac13b172362a82a64dafac1eb2f39d6a5add028ef2ca7297ec3"} Nov 26 05:29:02 crc kubenswrapper[4871]: I1126 05:29:02.798339 4871 generic.go:334] "Generic (PLEG): container finished" podID="40308fed-3d95-4693-9efd-e44e891eb454" containerID="493405ab0c343a579ba9dbee9856bce9592751cee8082fdf3930e8fb542d8abb" exitCode=0 Nov 26 05:29:02 crc kubenswrapper[4871]: I1126 05:29:02.798619 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xpxnm" event={"ID":"40308fed-3d95-4693-9efd-e44e891eb454","Type":"ContainerDied","Data":"493405ab0c343a579ba9dbee9856bce9592751cee8082fdf3930e8fb542d8abb"} Nov 26 05:29:02 crc kubenswrapper[4871]: I1126 05:29:02.800866 4871 generic.go:334] "Generic (PLEG): container finished" podID="28cb4585-570c-471f-81b7-df6b52ccda23" containerID="5ed1aed4084aa1c0cd68e980370290e5c0109ed63389aa20501a7dc35ccd185e" exitCode=0 Nov 26 05:29:02 crc kubenswrapper[4871]: I1126 05:29:02.800909 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bnc7b" event={"ID":"28cb4585-570c-471f-81b7-df6b52ccda23","Type":"ContainerDied","Data":"5ed1aed4084aa1c0cd68e980370290e5c0109ed63389aa20501a7dc35ccd185e"} Nov 26 05:29:03 crc kubenswrapper[4871]: I1126 05:29:03.808282 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xpxnm" event={"ID":"40308fed-3d95-4693-9efd-e44e891eb454","Type":"ContainerStarted","Data":"ba7df382eeec02e487961dd99f328a7ce290e0e7269b8e4924e614f308617e28"} Nov 26 05:29:03 crc kubenswrapper[4871]: I1126 05:29:03.810839 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bnc7b" event={"ID":"28cb4585-570c-471f-81b7-df6b52ccda23","Type":"ContainerStarted","Data":"504a0577340339585cf75c8f8c4b45db18aae2d1860aa700c531a1671d10d7de"} Nov 26 05:29:03 crc kubenswrapper[4871]: I1126 05:29:03.812981 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7n98c" event={"ID":"13a2fbc4-3140-412d-b990-9398453dc21c","Type":"ContainerStarted","Data":"4d4297a46ca0d290a9f22029d39d71a9985af8461fff2710af5acd1263b39bd5"} Nov 26 05:29:03 crc kubenswrapper[4871]: I1126 05:29:03.829842 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xpxnm" podStartSLOduration=2.9634293339999997 podStartE2EDuration="50.829817688s" podCreationTimestamp="2025-11-26 05:28:13 +0000 UTC" firstStartedPulling="2025-11-26 05:28:15.386670768 +0000 UTC m=+153.569722354" lastFinishedPulling="2025-11-26 05:29:03.253059082 +0000 UTC m=+201.436110708" observedRunningTime="2025-11-26 05:29:03.82583154 +0000 UTC m=+202.008883126" watchObservedRunningTime="2025-11-26 05:29:03.829817688 +0000 UTC m=+202.012869274" Nov 26 05:29:03 crc kubenswrapper[4871]: I1126 05:29:03.842847 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xpxnm" Nov 26 05:29:03 crc kubenswrapper[4871]: I1126 05:29:03.842897 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xpxnm" Nov 26 05:29:03 crc kubenswrapper[4871]: I1126 05:29:03.863717 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7n98c" podStartSLOduration=2.960607712 podStartE2EDuration="51.863702867s" podCreationTimestamp="2025-11-26 05:28:12 +0000 UTC" firstStartedPulling="2025-11-26 05:28:14.366964688 +0000 UTC m=+152.550016274" lastFinishedPulling="2025-11-26 05:29:03.270059803 +0000 UTC m=+201.453111429" observedRunningTime="2025-11-26 05:29:03.862030021 +0000 UTC m=+202.045081607" watchObservedRunningTime="2025-11-26 05:29:03.863702867 +0000 UTC m=+202.046754453" Nov 26 05:29:03 crc kubenswrapper[4871]: I1126 05:29:03.886071 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bnc7b" podStartSLOduration=2.946333434 podStartE2EDuration="51.886055222s" podCreationTimestamp="2025-11-26 05:28:12 +0000 UTC" firstStartedPulling="2025-11-26 05:28:14.367120931 +0000 UTC m=+152.550172517" lastFinishedPulling="2025-11-26 05:29:03.306842709 +0000 UTC m=+201.489894305" observedRunningTime="2025-11-26 05:29:03.885519998 +0000 UTC m=+202.068571584" watchObservedRunningTime="2025-11-26 05:29:03.886055222 +0000 UTC m=+202.069106808" Nov 26 05:29:04 crc kubenswrapper[4871]: I1126 05:29:04.244051 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-znrdp" Nov 26 05:29:04 crc kubenswrapper[4871]: I1126 05:29:04.244307 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-znrdp" Nov 26 05:29:04 crc kubenswrapper[4871]: I1126 05:29:04.538704 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6c56g"] Nov 26 05:29:04 crc kubenswrapper[4871]: I1126 05:29:04.538973 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6c56g" podUID="f7d5d2e5-f16d-4653-b6d4-4c5c75501655" containerName="registry-server" containerID="cri-o://933ec83cf79ef43e686c9c96b05d22bc9f852b5199807490c99d1af2f39dd585" gracePeriod=2 Nov 26 05:29:04 crc kubenswrapper[4871]: I1126 05:29:04.879643 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xpxnm" podUID="40308fed-3d95-4693-9efd-e44e891eb454" containerName="registry-server" probeResult="failure" output=< Nov 26 05:29:04 crc kubenswrapper[4871]: timeout: failed to connect service ":50051" within 1s Nov 26 05:29:04 crc kubenswrapper[4871]: > Nov 26 05:29:05 crc kubenswrapper[4871]: I1126 05:29:05.285896 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-znrdp" podUID="dcd2270a-9f70-4882-89f6-b916ef8fcc5d" containerName="registry-server" probeResult="failure" output=< Nov 26 05:29:05 crc kubenswrapper[4871]: timeout: failed to connect service ":50051" within 1s Nov 26 05:29:05 crc kubenswrapper[4871]: > Nov 26 05:29:05 crc kubenswrapper[4871]: I1126 05:29:05.831468 4871 generic.go:334] "Generic (PLEG): container finished" podID="f7d5d2e5-f16d-4653-b6d4-4c5c75501655" containerID="933ec83cf79ef43e686c9c96b05d22bc9f852b5199807490c99d1af2f39dd585" exitCode=0 Nov 26 05:29:05 crc kubenswrapper[4871]: I1126 05:29:05.831590 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6c56g" event={"ID":"f7d5d2e5-f16d-4653-b6d4-4c5c75501655","Type":"ContainerDied","Data":"933ec83cf79ef43e686c9c96b05d22bc9f852b5199807490c99d1af2f39dd585"} Nov 26 05:29:05 crc kubenswrapper[4871]: I1126 05:29:05.969108 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6c56g" Nov 26 05:29:06 crc kubenswrapper[4871]: I1126 05:29:06.130599 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7d5d2e5-f16d-4653-b6d4-4c5c75501655-utilities\") pod \"f7d5d2e5-f16d-4653-b6d4-4c5c75501655\" (UID: \"f7d5d2e5-f16d-4653-b6d4-4c5c75501655\") " Nov 26 05:29:06 crc kubenswrapper[4871]: I1126 05:29:06.130659 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7d5d2e5-f16d-4653-b6d4-4c5c75501655-catalog-content\") pod \"f7d5d2e5-f16d-4653-b6d4-4c5c75501655\" (UID: \"f7d5d2e5-f16d-4653-b6d4-4c5c75501655\") " Nov 26 05:29:06 crc kubenswrapper[4871]: I1126 05:29:06.130707 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjhkp\" (UniqueName: \"kubernetes.io/projected/f7d5d2e5-f16d-4653-b6d4-4c5c75501655-kube-api-access-zjhkp\") pod \"f7d5d2e5-f16d-4653-b6d4-4c5c75501655\" (UID: \"f7d5d2e5-f16d-4653-b6d4-4c5c75501655\") " Nov 26 05:29:06 crc kubenswrapper[4871]: I1126 05:29:06.132583 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7d5d2e5-f16d-4653-b6d4-4c5c75501655-utilities" (OuterVolumeSpecName: "utilities") pod "f7d5d2e5-f16d-4653-b6d4-4c5c75501655" (UID: "f7d5d2e5-f16d-4653-b6d4-4c5c75501655"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:29:06 crc kubenswrapper[4871]: I1126 05:29:06.138599 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7d5d2e5-f16d-4653-b6d4-4c5c75501655-kube-api-access-zjhkp" (OuterVolumeSpecName: "kube-api-access-zjhkp") pod "f7d5d2e5-f16d-4653-b6d4-4c5c75501655" (UID: "f7d5d2e5-f16d-4653-b6d4-4c5c75501655"). InnerVolumeSpecName "kube-api-access-zjhkp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:29:06 crc kubenswrapper[4871]: I1126 05:29:06.184815 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7d5d2e5-f16d-4653-b6d4-4c5c75501655-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f7d5d2e5-f16d-4653-b6d4-4c5c75501655" (UID: "f7d5d2e5-f16d-4653-b6d4-4c5c75501655"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:29:06 crc kubenswrapper[4871]: I1126 05:29:06.232451 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjhkp\" (UniqueName: \"kubernetes.io/projected/f7d5d2e5-f16d-4653-b6d4-4c5c75501655-kube-api-access-zjhkp\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:06 crc kubenswrapper[4871]: I1126 05:29:06.233402 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7d5d2e5-f16d-4653-b6d4-4c5c75501655-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:06 crc kubenswrapper[4871]: I1126 05:29:06.233540 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7d5d2e5-f16d-4653-b6d4-4c5c75501655-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:06 crc kubenswrapper[4871]: I1126 05:29:06.842689 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6c56g" event={"ID":"f7d5d2e5-f16d-4653-b6d4-4c5c75501655","Type":"ContainerDied","Data":"629b45c7b3f7e8fb01db38e58707a1404f26ae53bfded4457b1e833162d4847e"} Nov 26 05:29:06 crc kubenswrapper[4871]: I1126 05:29:06.842792 4871 scope.go:117] "RemoveContainer" containerID="933ec83cf79ef43e686c9c96b05d22bc9f852b5199807490c99d1af2f39dd585" Nov 26 05:29:06 crc kubenswrapper[4871]: I1126 05:29:06.843026 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6c56g" Nov 26 05:29:06 crc kubenswrapper[4871]: I1126 05:29:06.865605 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6c56g"] Nov 26 05:29:06 crc kubenswrapper[4871]: I1126 05:29:06.870713 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6c56g"] Nov 26 05:29:06 crc kubenswrapper[4871]: I1126 05:29:06.879003 4871 scope.go:117] "RemoveContainer" containerID="7c927bd78eddbbca0fa83481dfcc226e88f48873bc33f8295cb3fb2d2ab4f57c" Nov 26 05:29:06 crc kubenswrapper[4871]: I1126 05:29:06.895707 4871 scope.go:117] "RemoveContainer" containerID="db3971f53a06f1e57913083895a11b4cd09b7cdc0bbbad78ae3e62f9b561c173" Nov 26 05:29:08 crc kubenswrapper[4871]: I1126 05:29:08.519979 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7d5d2e5-f16d-4653-b6d4-4c5c75501655" path="/var/lib/kubelet/pods/f7d5d2e5-f16d-4653-b6d4-4c5c75501655/volumes" Nov 26 05:29:10 crc kubenswrapper[4871]: I1126 05:29:10.977089 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vx568" Nov 26 05:29:11 crc kubenswrapper[4871]: I1126 05:29:11.020854 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vx568" Nov 26 05:29:11 crc kubenswrapper[4871]: I1126 05:29:11.390127 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hlfdn" Nov 26 05:29:11 crc kubenswrapper[4871]: I1126 05:29:11.456376 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hlfdn" Nov 26 05:29:11 crc kubenswrapper[4871]: I1126 05:29:11.941507 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hlfdn"] Nov 26 05:29:12 crc kubenswrapper[4871]: I1126 05:29:12.848705 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7n98c" Nov 26 05:29:12 crc kubenswrapper[4871]: I1126 05:29:12.848787 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7n98c" Nov 26 05:29:12 crc kubenswrapper[4871]: I1126 05:29:12.893385 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-hlfdn" podUID="4776b484-dfbf-4547-9409-e2317c476932" containerName="registry-server" containerID="cri-o://1e4198ab9938de19748c2cace86ef43087618a568d2ef1e3290c3c62df7787ad" gracePeriod=2 Nov 26 05:29:12 crc kubenswrapper[4871]: I1126 05:29:12.923680 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7n98c" Nov 26 05:29:12 crc kubenswrapper[4871]: I1126 05:29:12.983460 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7n98c" Nov 26 05:29:13 crc kubenswrapper[4871]: I1126 05:29:13.240446 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bnc7b" Nov 26 05:29:13 crc kubenswrapper[4871]: I1126 05:29:13.241840 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bnc7b" Nov 26 05:29:13 crc kubenswrapper[4871]: I1126 05:29:13.315686 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bnc7b" Nov 26 05:29:13 crc kubenswrapper[4871]: I1126 05:29:13.884050 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xpxnm" Nov 26 05:29:13 crc kubenswrapper[4871]: I1126 05:29:13.903242 4871 generic.go:334] "Generic (PLEG): container finished" podID="4776b484-dfbf-4547-9409-e2317c476932" containerID="1e4198ab9938de19748c2cace86ef43087618a568d2ef1e3290c3c62df7787ad" exitCode=0 Nov 26 05:29:13 crc kubenswrapper[4871]: I1126 05:29:13.904626 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hlfdn" event={"ID":"4776b484-dfbf-4547-9409-e2317c476932","Type":"ContainerDied","Data":"1e4198ab9938de19748c2cace86ef43087618a568d2ef1e3290c3c62df7787ad"} Nov 26 05:29:13 crc kubenswrapper[4871]: I1126 05:29:13.930808 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xpxnm" Nov 26 05:29:13 crc kubenswrapper[4871]: I1126 05:29:13.950016 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bnc7b" Nov 26 05:29:13 crc kubenswrapper[4871]: I1126 05:29:13.984673 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hlfdn" Nov 26 05:29:14 crc kubenswrapper[4871]: I1126 05:29:14.156324 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4776b484-dfbf-4547-9409-e2317c476932-catalog-content\") pod \"4776b484-dfbf-4547-9409-e2317c476932\" (UID: \"4776b484-dfbf-4547-9409-e2317c476932\") " Nov 26 05:29:14 crc kubenswrapper[4871]: I1126 05:29:14.156558 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4776b484-dfbf-4547-9409-e2317c476932-utilities\") pod \"4776b484-dfbf-4547-9409-e2317c476932\" (UID: \"4776b484-dfbf-4547-9409-e2317c476932\") " Nov 26 05:29:14 crc kubenswrapper[4871]: I1126 05:29:14.156674 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q8bx4\" (UniqueName: \"kubernetes.io/projected/4776b484-dfbf-4547-9409-e2317c476932-kube-api-access-q8bx4\") pod \"4776b484-dfbf-4547-9409-e2317c476932\" (UID: \"4776b484-dfbf-4547-9409-e2317c476932\") " Nov 26 05:29:14 crc kubenswrapper[4871]: I1126 05:29:14.158032 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4776b484-dfbf-4547-9409-e2317c476932-utilities" (OuterVolumeSpecName: "utilities") pod "4776b484-dfbf-4547-9409-e2317c476932" (UID: "4776b484-dfbf-4547-9409-e2317c476932"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:29:14 crc kubenswrapper[4871]: I1126 05:29:14.164877 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4776b484-dfbf-4547-9409-e2317c476932-kube-api-access-q8bx4" (OuterVolumeSpecName: "kube-api-access-q8bx4") pod "4776b484-dfbf-4547-9409-e2317c476932" (UID: "4776b484-dfbf-4547-9409-e2317c476932"). InnerVolumeSpecName "kube-api-access-q8bx4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:29:14 crc kubenswrapper[4871]: I1126 05:29:14.209820 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4776b484-dfbf-4547-9409-e2317c476932-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4776b484-dfbf-4547-9409-e2317c476932" (UID: "4776b484-dfbf-4547-9409-e2317c476932"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:29:14 crc kubenswrapper[4871]: I1126 05:29:14.257760 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4776b484-dfbf-4547-9409-e2317c476932-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:14 crc kubenswrapper[4871]: I1126 05:29:14.257790 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q8bx4\" (UniqueName: \"kubernetes.io/projected/4776b484-dfbf-4547-9409-e2317c476932-kube-api-access-q8bx4\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:14 crc kubenswrapper[4871]: I1126 05:29:14.257800 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4776b484-dfbf-4547-9409-e2317c476932-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:14 crc kubenswrapper[4871]: I1126 05:29:14.292952 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-znrdp" Nov 26 05:29:14 crc kubenswrapper[4871]: I1126 05:29:14.346671 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-znrdp" Nov 26 05:29:14 crc kubenswrapper[4871]: I1126 05:29:14.740848 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bnc7b"] Nov 26 05:29:14 crc kubenswrapper[4871]: I1126 05:29:14.914782 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hlfdn" event={"ID":"4776b484-dfbf-4547-9409-e2317c476932","Type":"ContainerDied","Data":"2b92944619582b9ee23f16ffd0d76613470594428bcc3267fdf4e539699017bc"} Nov 26 05:29:14 crc kubenswrapper[4871]: I1126 05:29:14.914894 4871 scope.go:117] "RemoveContainer" containerID="1e4198ab9938de19748c2cace86ef43087618a568d2ef1e3290c3c62df7787ad" Nov 26 05:29:14 crc kubenswrapper[4871]: I1126 05:29:14.915248 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hlfdn" Nov 26 05:29:14 crc kubenswrapper[4871]: I1126 05:29:14.941253 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hlfdn"] Nov 26 05:29:14 crc kubenswrapper[4871]: I1126 05:29:14.948405 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-hlfdn"] Nov 26 05:29:14 crc kubenswrapper[4871]: I1126 05:29:14.953182 4871 scope.go:117] "RemoveContainer" containerID="07f4eaca5aa84c51c7d255d93cc663cb5bf3712d5a814fd8bc4b967c888f6ea9" Nov 26 05:29:14 crc kubenswrapper[4871]: I1126 05:29:14.981854 4871 scope.go:117] "RemoveContainer" containerID="419bbfdf8818640e744c2c9d9e3a102b22d51e6af93b2f2a8b8edfa9a51b59af" Nov 26 05:29:15 crc kubenswrapper[4871]: I1126 05:29:15.924675 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bnc7b" podUID="28cb4585-570c-471f-81b7-df6b52ccda23" containerName="registry-server" containerID="cri-o://504a0577340339585cf75c8f8c4b45db18aae2d1860aa700c531a1671d10d7de" gracePeriod=2 Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.145459 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-znrdp"] Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.146167 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-znrdp" podUID="dcd2270a-9f70-4882-89f6-b916ef8fcc5d" containerName="registry-server" containerID="cri-o://cdf16438ccfbac067ba5096fe957db52c5054fc78375bc8a4874fd98d6b24f0b" gracePeriod=2 Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.368099 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bnc7b" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.489274 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28cb4585-570c-471f-81b7-df6b52ccda23-utilities\") pod \"28cb4585-570c-471f-81b7-df6b52ccda23\" (UID: \"28cb4585-570c-471f-81b7-df6b52ccda23\") " Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.489379 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28cb4585-570c-471f-81b7-df6b52ccda23-catalog-content\") pod \"28cb4585-570c-471f-81b7-df6b52ccda23\" (UID: \"28cb4585-570c-471f-81b7-df6b52ccda23\") " Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.489433 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f2ghg\" (UniqueName: \"kubernetes.io/projected/28cb4585-570c-471f-81b7-df6b52ccda23-kube-api-access-f2ghg\") pod \"28cb4585-570c-471f-81b7-df6b52ccda23\" (UID: \"28cb4585-570c-471f-81b7-df6b52ccda23\") " Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.508336 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28cb4585-570c-471f-81b7-df6b52ccda23-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "28cb4585-570c-471f-81b7-df6b52ccda23" (UID: "28cb4585-570c-471f-81b7-df6b52ccda23"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.508836 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28cb4585-570c-471f-81b7-df6b52ccda23-kube-api-access-f2ghg" (OuterVolumeSpecName: "kube-api-access-f2ghg") pod "28cb4585-570c-471f-81b7-df6b52ccda23" (UID: "28cb4585-570c-471f-81b7-df6b52ccda23"). InnerVolumeSpecName "kube-api-access-f2ghg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.513084 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28cb4585-570c-471f-81b7-df6b52ccda23-utilities" (OuterVolumeSpecName: "utilities") pod "28cb4585-570c-471f-81b7-df6b52ccda23" (UID: "28cb4585-570c-471f-81b7-df6b52ccda23"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.524027 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4776b484-dfbf-4547-9409-e2317c476932" path="/var/lib/kubelet/pods/4776b484-dfbf-4547-9409-e2317c476932/volumes" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.527486 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-znrdp" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.591888 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f2ghg\" (UniqueName: \"kubernetes.io/projected/28cb4585-570c-471f-81b7-df6b52ccda23-kube-api-access-f2ghg\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.591962 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28cb4585-570c-471f-81b7-df6b52ccda23-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.591976 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28cb4585-570c-471f-81b7-df6b52ccda23-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.693271 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcd2270a-9f70-4882-89f6-b916ef8fcc5d-catalog-content\") pod \"dcd2270a-9f70-4882-89f6-b916ef8fcc5d\" (UID: \"dcd2270a-9f70-4882-89f6-b916ef8fcc5d\") " Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.693397 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcd2270a-9f70-4882-89f6-b916ef8fcc5d-utilities\") pod \"dcd2270a-9f70-4882-89f6-b916ef8fcc5d\" (UID: \"dcd2270a-9f70-4882-89f6-b916ef8fcc5d\") " Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.693440 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nxx6\" (UniqueName: \"kubernetes.io/projected/dcd2270a-9f70-4882-89f6-b916ef8fcc5d-kube-api-access-7nxx6\") pod \"dcd2270a-9f70-4882-89f6-b916ef8fcc5d\" (UID: \"dcd2270a-9f70-4882-89f6-b916ef8fcc5d\") " Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.694739 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dcd2270a-9f70-4882-89f6-b916ef8fcc5d-utilities" (OuterVolumeSpecName: "utilities") pod "dcd2270a-9f70-4882-89f6-b916ef8fcc5d" (UID: "dcd2270a-9f70-4882-89f6-b916ef8fcc5d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.696719 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dcd2270a-9f70-4882-89f6-b916ef8fcc5d-kube-api-access-7nxx6" (OuterVolumeSpecName: "kube-api-access-7nxx6") pod "dcd2270a-9f70-4882-89f6-b916ef8fcc5d" (UID: "dcd2270a-9f70-4882-89f6-b916ef8fcc5d"). InnerVolumeSpecName "kube-api-access-7nxx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.788336 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dcd2270a-9f70-4882-89f6-b916ef8fcc5d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dcd2270a-9f70-4882-89f6-b916ef8fcc5d" (UID: "dcd2270a-9f70-4882-89f6-b916ef8fcc5d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.794748 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dcd2270a-9f70-4882-89f6-b916ef8fcc5d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.794794 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nxx6\" (UniqueName: \"kubernetes.io/projected/dcd2270a-9f70-4882-89f6-b916ef8fcc5d-kube-api-access-7nxx6\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.794810 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dcd2270a-9f70-4882-89f6-b916ef8fcc5d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.930062 4871 generic.go:334] "Generic (PLEG): container finished" podID="28cb4585-570c-471f-81b7-df6b52ccda23" containerID="504a0577340339585cf75c8f8c4b45db18aae2d1860aa700c531a1671d10d7de" exitCode=0 Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.930126 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bnc7b" event={"ID":"28cb4585-570c-471f-81b7-df6b52ccda23","Type":"ContainerDied","Data":"504a0577340339585cf75c8f8c4b45db18aae2d1860aa700c531a1671d10d7de"} Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.930152 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bnc7b" event={"ID":"28cb4585-570c-471f-81b7-df6b52ccda23","Type":"ContainerDied","Data":"404a68db30f5ab86be4b54c0e274e8866c432b861dee49198911a3f68a59fc76"} Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.930163 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bnc7b" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.930169 4871 scope.go:117] "RemoveContainer" containerID="504a0577340339585cf75c8f8c4b45db18aae2d1860aa700c531a1671d10d7de" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.932143 4871 generic.go:334] "Generic (PLEG): container finished" podID="dcd2270a-9f70-4882-89f6-b916ef8fcc5d" containerID="cdf16438ccfbac067ba5096fe957db52c5054fc78375bc8a4874fd98d6b24f0b" exitCode=0 Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.932172 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znrdp" event={"ID":"dcd2270a-9f70-4882-89f6-b916ef8fcc5d","Type":"ContainerDied","Data":"cdf16438ccfbac067ba5096fe957db52c5054fc78375bc8a4874fd98d6b24f0b"} Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.932190 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-znrdp" event={"ID":"dcd2270a-9f70-4882-89f6-b916ef8fcc5d","Type":"ContainerDied","Data":"b056db94caf86d1b4159aae9160fda348949c491f841a7d5679c97402266064d"} Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.932226 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-znrdp" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.947155 4871 scope.go:117] "RemoveContainer" containerID="5ed1aed4084aa1c0cd68e980370290e5c0109ed63389aa20501a7dc35ccd185e" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.949251 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bnc7b"] Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.953665 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bnc7b"] Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.962691 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-znrdp"] Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.966402 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-znrdp"] Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.968494 4871 scope.go:117] "RemoveContainer" containerID="9d235161770cbfa40eea52471de4cc6d178b7991da072b81699daf473a3b9fd5" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.980573 4871 scope.go:117] "RemoveContainer" containerID="504a0577340339585cf75c8f8c4b45db18aae2d1860aa700c531a1671d10d7de" Nov 26 05:29:16 crc kubenswrapper[4871]: E1126 05:29:16.981063 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"504a0577340339585cf75c8f8c4b45db18aae2d1860aa700c531a1671d10d7de\": container with ID starting with 504a0577340339585cf75c8f8c4b45db18aae2d1860aa700c531a1671d10d7de not found: ID does not exist" containerID="504a0577340339585cf75c8f8c4b45db18aae2d1860aa700c531a1671d10d7de" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.981111 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"504a0577340339585cf75c8f8c4b45db18aae2d1860aa700c531a1671d10d7de"} err="failed to get container status \"504a0577340339585cf75c8f8c4b45db18aae2d1860aa700c531a1671d10d7de\": rpc error: code = NotFound desc = could not find container \"504a0577340339585cf75c8f8c4b45db18aae2d1860aa700c531a1671d10d7de\": container with ID starting with 504a0577340339585cf75c8f8c4b45db18aae2d1860aa700c531a1671d10d7de not found: ID does not exist" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.981161 4871 scope.go:117] "RemoveContainer" containerID="5ed1aed4084aa1c0cd68e980370290e5c0109ed63389aa20501a7dc35ccd185e" Nov 26 05:29:16 crc kubenswrapper[4871]: E1126 05:29:16.981482 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ed1aed4084aa1c0cd68e980370290e5c0109ed63389aa20501a7dc35ccd185e\": container with ID starting with 5ed1aed4084aa1c0cd68e980370290e5c0109ed63389aa20501a7dc35ccd185e not found: ID does not exist" containerID="5ed1aed4084aa1c0cd68e980370290e5c0109ed63389aa20501a7dc35ccd185e" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.981724 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ed1aed4084aa1c0cd68e980370290e5c0109ed63389aa20501a7dc35ccd185e"} err="failed to get container status \"5ed1aed4084aa1c0cd68e980370290e5c0109ed63389aa20501a7dc35ccd185e\": rpc error: code = NotFound desc = could not find container \"5ed1aed4084aa1c0cd68e980370290e5c0109ed63389aa20501a7dc35ccd185e\": container with ID starting with 5ed1aed4084aa1c0cd68e980370290e5c0109ed63389aa20501a7dc35ccd185e not found: ID does not exist" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.981804 4871 scope.go:117] "RemoveContainer" containerID="9d235161770cbfa40eea52471de4cc6d178b7991da072b81699daf473a3b9fd5" Nov 26 05:29:16 crc kubenswrapper[4871]: E1126 05:29:16.982088 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d235161770cbfa40eea52471de4cc6d178b7991da072b81699daf473a3b9fd5\": container with ID starting with 9d235161770cbfa40eea52471de4cc6d178b7991da072b81699daf473a3b9fd5 not found: ID does not exist" containerID="9d235161770cbfa40eea52471de4cc6d178b7991da072b81699daf473a3b9fd5" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.982162 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d235161770cbfa40eea52471de4cc6d178b7991da072b81699daf473a3b9fd5"} err="failed to get container status \"9d235161770cbfa40eea52471de4cc6d178b7991da072b81699daf473a3b9fd5\": rpc error: code = NotFound desc = could not find container \"9d235161770cbfa40eea52471de4cc6d178b7991da072b81699daf473a3b9fd5\": container with ID starting with 9d235161770cbfa40eea52471de4cc6d178b7991da072b81699daf473a3b9fd5 not found: ID does not exist" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.982227 4871 scope.go:117] "RemoveContainer" containerID="cdf16438ccfbac067ba5096fe957db52c5054fc78375bc8a4874fd98d6b24f0b" Nov 26 05:29:16 crc kubenswrapper[4871]: I1126 05:29:16.991954 4871 scope.go:117] "RemoveContainer" containerID="b6f60725213f1fcfa8aebe72ac82ff598e66f296827607448877cc025950ad69" Nov 26 05:29:17 crc kubenswrapper[4871]: I1126 05:29:17.003998 4871 scope.go:117] "RemoveContainer" containerID="4cad2e98ed79003deaa126a4270cdb2c079f14ba18c40de718d12af2d5650e9f" Nov 26 05:29:17 crc kubenswrapper[4871]: I1126 05:29:17.016772 4871 scope.go:117] "RemoveContainer" containerID="cdf16438ccfbac067ba5096fe957db52c5054fc78375bc8a4874fd98d6b24f0b" Nov 26 05:29:17 crc kubenswrapper[4871]: E1126 05:29:17.017027 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cdf16438ccfbac067ba5096fe957db52c5054fc78375bc8a4874fd98d6b24f0b\": container with ID starting with cdf16438ccfbac067ba5096fe957db52c5054fc78375bc8a4874fd98d6b24f0b not found: ID does not exist" containerID="cdf16438ccfbac067ba5096fe957db52c5054fc78375bc8a4874fd98d6b24f0b" Nov 26 05:29:17 crc kubenswrapper[4871]: I1126 05:29:17.017055 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdf16438ccfbac067ba5096fe957db52c5054fc78375bc8a4874fd98d6b24f0b"} err="failed to get container status \"cdf16438ccfbac067ba5096fe957db52c5054fc78375bc8a4874fd98d6b24f0b\": rpc error: code = NotFound desc = could not find container \"cdf16438ccfbac067ba5096fe957db52c5054fc78375bc8a4874fd98d6b24f0b\": container with ID starting with cdf16438ccfbac067ba5096fe957db52c5054fc78375bc8a4874fd98d6b24f0b not found: ID does not exist" Nov 26 05:29:17 crc kubenswrapper[4871]: I1126 05:29:17.017073 4871 scope.go:117] "RemoveContainer" containerID="b6f60725213f1fcfa8aebe72ac82ff598e66f296827607448877cc025950ad69" Nov 26 05:29:17 crc kubenswrapper[4871]: E1126 05:29:17.018379 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6f60725213f1fcfa8aebe72ac82ff598e66f296827607448877cc025950ad69\": container with ID starting with b6f60725213f1fcfa8aebe72ac82ff598e66f296827607448877cc025950ad69 not found: ID does not exist" containerID="b6f60725213f1fcfa8aebe72ac82ff598e66f296827607448877cc025950ad69" Nov 26 05:29:17 crc kubenswrapper[4871]: I1126 05:29:17.018401 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6f60725213f1fcfa8aebe72ac82ff598e66f296827607448877cc025950ad69"} err="failed to get container status \"b6f60725213f1fcfa8aebe72ac82ff598e66f296827607448877cc025950ad69\": rpc error: code = NotFound desc = could not find container \"b6f60725213f1fcfa8aebe72ac82ff598e66f296827607448877cc025950ad69\": container with ID starting with b6f60725213f1fcfa8aebe72ac82ff598e66f296827607448877cc025950ad69 not found: ID does not exist" Nov 26 05:29:17 crc kubenswrapper[4871]: I1126 05:29:17.018413 4871 scope.go:117] "RemoveContainer" containerID="4cad2e98ed79003deaa126a4270cdb2c079f14ba18c40de718d12af2d5650e9f" Nov 26 05:29:17 crc kubenswrapper[4871]: E1126 05:29:17.018781 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cad2e98ed79003deaa126a4270cdb2c079f14ba18c40de718d12af2d5650e9f\": container with ID starting with 4cad2e98ed79003deaa126a4270cdb2c079f14ba18c40de718d12af2d5650e9f not found: ID does not exist" containerID="4cad2e98ed79003deaa126a4270cdb2c079f14ba18c40de718d12af2d5650e9f" Nov 26 05:29:17 crc kubenswrapper[4871]: I1126 05:29:17.018797 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cad2e98ed79003deaa126a4270cdb2c079f14ba18c40de718d12af2d5650e9f"} err="failed to get container status \"4cad2e98ed79003deaa126a4270cdb2c079f14ba18c40de718d12af2d5650e9f\": rpc error: code = NotFound desc = could not find container \"4cad2e98ed79003deaa126a4270cdb2c079f14ba18c40de718d12af2d5650e9f\": container with ID starting with 4cad2e98ed79003deaa126a4270cdb2c079f14ba18c40de718d12af2d5650e9f not found: ID does not exist" Nov 26 05:29:18 crc kubenswrapper[4871]: I1126 05:29:18.517011 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28cb4585-570c-471f-81b7-df6b52ccda23" path="/var/lib/kubelet/pods/28cb4585-570c-471f-81b7-df6b52ccda23/volumes" Nov 26 05:29:18 crc kubenswrapper[4871]: I1126 05:29:18.518211 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dcd2270a-9f70-4882-89f6-b916ef8fcc5d" path="/var/lib/kubelet/pods/dcd2270a-9f70-4882-89f6-b916ef8fcc5d/volumes" Nov 26 05:29:22 crc kubenswrapper[4871]: I1126 05:29:22.676603 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pz8qb"] Nov 26 05:29:23 crc kubenswrapper[4871]: I1126 05:29:23.614766 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:29:23 crc kubenswrapper[4871]: I1126 05:29:23.615190 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:29:23 crc kubenswrapper[4871]: I1126 05:29:23.615372 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:29:23 crc kubenswrapper[4871]: I1126 05:29:23.616074 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 05:29:23 crc kubenswrapper[4871]: I1126 05:29:23.616283 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417" gracePeriod=600 Nov 26 05:29:23 crc kubenswrapper[4871]: I1126 05:29:23.988770 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417" exitCode=0 Nov 26 05:29:23 crc kubenswrapper[4871]: I1126 05:29:23.988905 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417"} Nov 26 05:29:23 crc kubenswrapper[4871]: I1126 05:29:23.989302 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"a526a4481d162e6a9e1a274d55add2a702076a153538d8c5c161152ee4344647"} Nov 26 05:29:47 crc kubenswrapper[4871]: I1126 05:29:47.718745 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" podUID="86ec5766-3ccf-487f-97bf-f0dda4f06b0e" containerName="oauth-openshift" containerID="cri-o://9a8bd9ec524d6d172b911cd75905952ca08aadc1705cdeb4c3d7d4c7706b7fc4" gracePeriod=15 Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.139795 4871 generic.go:334] "Generic (PLEG): container finished" podID="86ec5766-3ccf-487f-97bf-f0dda4f06b0e" containerID="9a8bd9ec524d6d172b911cd75905952ca08aadc1705cdeb4c3d7d4c7706b7fc4" exitCode=0 Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.139907 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" event={"ID":"86ec5766-3ccf-487f-97bf-f0dda4f06b0e","Type":"ContainerDied","Data":"9a8bd9ec524d6d172b911cd75905952ca08aadc1705cdeb4c3d7d4c7706b7fc4"} Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.140046 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" event={"ID":"86ec5766-3ccf-487f-97bf-f0dda4f06b0e","Type":"ContainerDied","Data":"7e91dee06efc83b6571d19ab884b1d17b16aaf4f160555be8e410a469ada9302"} Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.140063 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e91dee06efc83b6571d19ab884b1d17b16aaf4f160555be8e410a469ada9302" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.148734 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.203485 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-d878cb77-b7nhs"] Nov 26 05:29:48 crc kubenswrapper[4871]: E1126 05:29:48.203829 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4776b484-dfbf-4547-9409-e2317c476932" containerName="extract-content" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.203849 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="4776b484-dfbf-4547-9409-e2317c476932" containerName="extract-content" Nov 26 05:29:48 crc kubenswrapper[4871]: E1126 05:29:48.203871 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28cb4585-570c-471f-81b7-df6b52ccda23" containerName="extract-utilities" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.203883 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="28cb4585-570c-471f-81b7-df6b52ccda23" containerName="extract-utilities" Nov 26 05:29:48 crc kubenswrapper[4871]: E1126 05:29:48.203904 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86ec5766-3ccf-487f-97bf-f0dda4f06b0e" containerName="oauth-openshift" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.203916 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="86ec5766-3ccf-487f-97bf-f0dda4f06b0e" containerName="oauth-openshift" Nov 26 05:29:48 crc kubenswrapper[4871]: E1126 05:29:48.203938 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcd2270a-9f70-4882-89f6-b916ef8fcc5d" containerName="registry-server" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.203950 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcd2270a-9f70-4882-89f6-b916ef8fcc5d" containerName="registry-server" Nov 26 05:29:48 crc kubenswrapper[4871]: E1126 05:29:48.203973 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28cb4585-570c-471f-81b7-df6b52ccda23" containerName="extract-content" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.203984 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="28cb4585-570c-471f-81b7-df6b52ccda23" containerName="extract-content" Nov 26 05:29:48 crc kubenswrapper[4871]: E1126 05:29:48.204000 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7d5d2e5-f16d-4653-b6d4-4c5c75501655" containerName="extract-utilities" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.204013 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7d5d2e5-f16d-4653-b6d4-4c5c75501655" containerName="extract-utilities" Nov 26 05:29:48 crc kubenswrapper[4871]: E1126 05:29:48.204030 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4776b484-dfbf-4547-9409-e2317c476932" containerName="registry-server" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.204042 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="4776b484-dfbf-4547-9409-e2317c476932" containerName="registry-server" Nov 26 05:29:48 crc kubenswrapper[4871]: E1126 05:29:48.204058 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcd2270a-9f70-4882-89f6-b916ef8fcc5d" containerName="extract-utilities" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.204070 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcd2270a-9f70-4882-89f6-b916ef8fcc5d" containerName="extract-utilities" Nov 26 05:29:48 crc kubenswrapper[4871]: E1126 05:29:48.204083 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7d5d2e5-f16d-4653-b6d4-4c5c75501655" containerName="extract-content" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.204094 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7d5d2e5-f16d-4653-b6d4-4c5c75501655" containerName="extract-content" Nov 26 05:29:48 crc kubenswrapper[4871]: E1126 05:29:48.204116 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcd2270a-9f70-4882-89f6-b916ef8fcc5d" containerName="extract-content" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.204127 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcd2270a-9f70-4882-89f6-b916ef8fcc5d" containerName="extract-content" Nov 26 05:29:48 crc kubenswrapper[4871]: E1126 05:29:48.204147 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7d5d2e5-f16d-4653-b6d4-4c5c75501655" containerName="registry-server" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.204159 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7d5d2e5-f16d-4653-b6d4-4c5c75501655" containerName="registry-server" Nov 26 05:29:48 crc kubenswrapper[4871]: E1126 05:29:48.204175 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4776b484-dfbf-4547-9409-e2317c476932" containerName="extract-utilities" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.204222 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="4776b484-dfbf-4547-9409-e2317c476932" containerName="extract-utilities" Nov 26 05:29:48 crc kubenswrapper[4871]: E1126 05:29:48.204236 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28cb4585-570c-471f-81b7-df6b52ccda23" containerName="registry-server" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.204248 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="28cb4585-570c-471f-81b7-df6b52ccda23" containerName="registry-server" Nov 26 05:29:48 crc kubenswrapper[4871]: E1126 05:29:48.204263 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ced5062-5584-44c8-96aa-b98c22d90ef6" containerName="pruner" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.204275 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ced5062-5584-44c8-96aa-b98c22d90ef6" containerName="pruner" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.204430 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ced5062-5584-44c8-96aa-b98c22d90ef6" containerName="pruner" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.204452 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7d5d2e5-f16d-4653-b6d4-4c5c75501655" containerName="registry-server" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.204471 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="28cb4585-570c-471f-81b7-df6b52ccda23" containerName="registry-server" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.204499 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="dcd2270a-9f70-4882-89f6-b916ef8fcc5d" containerName="registry-server" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.204553 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="86ec5766-3ccf-487f-97bf-f0dda4f06b0e" containerName="oauth-openshift" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.204570 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="4776b484-dfbf-4547-9409-e2317c476932" containerName="registry-server" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.205308 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.215764 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-d878cb77-b7nhs"] Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.245483 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-audit-dir\") pod \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.245598 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-router-certs\") pod \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.245666 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-audit-policies\") pod \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.245704 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-idp-0-file-data\") pod \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.245735 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-session\") pod \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.245774 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-669w4\" (UniqueName: \"kubernetes.io/projected/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-kube-api-access-669w4\") pod \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.245842 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-template-login\") pod \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.245889 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-cliconfig\") pod \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.245956 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-service-ca\") pod \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.245996 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-trusted-ca-bundle\") pod \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.246027 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-template-error\") pod \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.246062 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-template-provider-selection\") pod \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.246096 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-serving-cert\") pod \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.246134 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-ocp-branding-template\") pod \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\" (UID: \"86ec5766-3ccf-487f-97bf-f0dda4f06b0e\") " Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.252175 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "86ec5766-3ccf-487f-97bf-f0dda4f06b0e" (UID: "86ec5766-3ccf-487f-97bf-f0dda4f06b0e"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.252232 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "86ec5766-3ccf-487f-97bf-f0dda4f06b0e" (UID: "86ec5766-3ccf-487f-97bf-f0dda4f06b0e"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.255783 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "86ec5766-3ccf-487f-97bf-f0dda4f06b0e" (UID: "86ec5766-3ccf-487f-97bf-f0dda4f06b0e"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.256022 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "86ec5766-3ccf-487f-97bf-f0dda4f06b0e" (UID: "86ec5766-3ccf-487f-97bf-f0dda4f06b0e"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.256363 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "86ec5766-3ccf-487f-97bf-f0dda4f06b0e" (UID: "86ec5766-3ccf-487f-97bf-f0dda4f06b0e"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.260131 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "86ec5766-3ccf-487f-97bf-f0dda4f06b0e" (UID: "86ec5766-3ccf-487f-97bf-f0dda4f06b0e"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.263025 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "86ec5766-3ccf-487f-97bf-f0dda4f06b0e" (UID: "86ec5766-3ccf-487f-97bf-f0dda4f06b0e"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.263187 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-kube-api-access-669w4" (OuterVolumeSpecName: "kube-api-access-669w4") pod "86ec5766-3ccf-487f-97bf-f0dda4f06b0e" (UID: "86ec5766-3ccf-487f-97bf-f0dda4f06b0e"). InnerVolumeSpecName "kube-api-access-669w4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.264292 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "86ec5766-3ccf-487f-97bf-f0dda4f06b0e" (UID: "86ec5766-3ccf-487f-97bf-f0dda4f06b0e"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.264856 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "86ec5766-3ccf-487f-97bf-f0dda4f06b0e" (UID: "86ec5766-3ccf-487f-97bf-f0dda4f06b0e"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.265062 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "86ec5766-3ccf-487f-97bf-f0dda4f06b0e" (UID: "86ec5766-3ccf-487f-97bf-f0dda4f06b0e"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.265488 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "86ec5766-3ccf-487f-97bf-f0dda4f06b0e" (UID: "86ec5766-3ccf-487f-97bf-f0dda4f06b0e"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.265797 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "86ec5766-3ccf-487f-97bf-f0dda4f06b0e" (UID: "86ec5766-3ccf-487f-97bf-f0dda4f06b0e"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.265943 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "86ec5766-3ccf-487f-97bf-f0dda4f06b0e" (UID: "86ec5766-3ccf-487f-97bf-f0dda4f06b0e"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.348045 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/88887f5d-afec-4b24-80d9-70672d56dae4-audit-dir\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.348112 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.348145 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.348293 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.348377 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-serving-cert\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.348427 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-cliconfig\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.348459 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrnhw\" (UniqueName: \"kubernetes.io/projected/88887f5d-afec-4b24-80d9-70672d56dae4-kube-api-access-nrnhw\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.348499 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.348593 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-user-template-login\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.348704 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-session\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.348878 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/88887f5d-afec-4b24-80d9-70672d56dae4-audit-policies\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.349023 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-user-template-error\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.349085 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-service-ca\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.349153 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-router-certs\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.349283 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.349319 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.349341 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.349364 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.349384 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.349404 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.349422 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.349443 4871 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.349464 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.349482 4871 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.349500 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.349519 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.349564 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-669w4\" (UniqueName: \"kubernetes.io/projected/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-kube-api-access-669w4\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.349584 4871 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/86ec5766-3ccf-487f-97bf-f0dda4f06b0e-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.451150 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-router-certs\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.451266 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/88887f5d-afec-4b24-80d9-70672d56dae4-audit-dir\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.451312 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.451334 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/88887f5d-afec-4b24-80d9-70672d56dae4-audit-dir\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.451359 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.451394 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.451432 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-serving-cert\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.451474 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-cliconfig\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.451504 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrnhw\" (UniqueName: \"kubernetes.io/projected/88887f5d-afec-4b24-80d9-70672d56dae4-kube-api-access-nrnhw\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.451561 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.451623 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-user-template-login\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.451675 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-session\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.451728 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/88887f5d-afec-4b24-80d9-70672d56dae4-audit-policies\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.451785 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-user-template-error\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.451817 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-service-ca\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.454265 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/88887f5d-afec-4b24-80d9-70672d56dae4-audit-policies\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.455431 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-service-ca\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.455962 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.456952 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-session\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.457112 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-router-certs\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.457272 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.457547 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.457821 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.458175 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-cliconfig\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.459576 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-system-serving-cert\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.459948 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-user-template-error\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.462570 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/88887f5d-afec-4b24-80d9-70672d56dae4-v4-0-config-user-template-login\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.481823 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrnhw\" (UniqueName: \"kubernetes.io/projected/88887f5d-afec-4b24-80d9-70672d56dae4-kube-api-access-nrnhw\") pod \"oauth-openshift-d878cb77-b7nhs\" (UID: \"88887f5d-afec-4b24-80d9-70672d56dae4\") " pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.526826 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:48 crc kubenswrapper[4871]: I1126 05:29:48.820380 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-d878cb77-b7nhs"] Nov 26 05:29:49 crc kubenswrapper[4871]: I1126 05:29:49.150019 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" event={"ID":"88887f5d-afec-4b24-80d9-70672d56dae4","Type":"ContainerStarted","Data":"3974aa30394767916580c00b0de4a33027286ea5ea20e83dd10f2c1b801077ae"} Nov 26 05:29:49 crc kubenswrapper[4871]: I1126 05:29:49.150065 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-pz8qb" Nov 26 05:29:49 crc kubenswrapper[4871]: I1126 05:29:49.182870 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pz8qb"] Nov 26 05:29:49 crc kubenswrapper[4871]: I1126 05:29:49.187806 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pz8qb"] Nov 26 05:29:50 crc kubenswrapper[4871]: I1126 05:29:50.158398 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" event={"ID":"88887f5d-afec-4b24-80d9-70672d56dae4","Type":"ContainerStarted","Data":"e8066317a119c167aba7e9e67cf36d64ccfeb250d526c360233e87d8c17f07a4"} Nov 26 05:29:50 crc kubenswrapper[4871]: I1126 05:29:50.158823 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:50 crc kubenswrapper[4871]: I1126 05:29:50.167061 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" Nov 26 05:29:50 crc kubenswrapper[4871]: I1126 05:29:50.223719 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-d878cb77-b7nhs" podStartSLOduration=28.22368925 podStartE2EDuration="28.22368925s" podCreationTimestamp="2025-11-26 05:29:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:29:50.192465874 +0000 UTC m=+248.375517470" watchObservedRunningTime="2025-11-26 05:29:50.22368925 +0000 UTC m=+248.406740876" Nov 26 05:29:50 crc kubenswrapper[4871]: I1126 05:29:50.519697 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86ec5766-3ccf-487f-97bf-f0dda4f06b0e" path="/var/lib/kubelet/pods/86ec5766-3ccf-487f-97bf-f0dda4f06b0e/volumes" Nov 26 05:30:00 crc kubenswrapper[4871]: I1126 05:30:00.139928 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g"] Nov 26 05:30:00 crc kubenswrapper[4871]: I1126 05:30:00.140996 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g" Nov 26 05:30:00 crc kubenswrapper[4871]: I1126 05:30:00.144843 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 05:30:00 crc kubenswrapper[4871]: I1126 05:30:00.145332 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 05:30:00 crc kubenswrapper[4871]: I1126 05:30:00.149012 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g"] Nov 26 05:30:00 crc kubenswrapper[4871]: I1126 05:30:00.307589 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6-secret-volume\") pod \"collect-profiles-29402250-l2k2g\" (UID: \"ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g" Nov 26 05:30:00 crc kubenswrapper[4871]: I1126 05:30:00.307633 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6-config-volume\") pod \"collect-profiles-29402250-l2k2g\" (UID: \"ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g" Nov 26 05:30:00 crc kubenswrapper[4871]: I1126 05:30:00.307755 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjkl9\" (UniqueName: \"kubernetes.io/projected/ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6-kube-api-access-tjkl9\") pod \"collect-profiles-29402250-l2k2g\" (UID: \"ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g" Nov 26 05:30:00 crc kubenswrapper[4871]: I1126 05:30:00.408915 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjkl9\" (UniqueName: \"kubernetes.io/projected/ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6-kube-api-access-tjkl9\") pod \"collect-profiles-29402250-l2k2g\" (UID: \"ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g" Nov 26 05:30:00 crc kubenswrapper[4871]: I1126 05:30:00.408995 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6-secret-volume\") pod \"collect-profiles-29402250-l2k2g\" (UID: \"ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g" Nov 26 05:30:00 crc kubenswrapper[4871]: I1126 05:30:00.409021 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6-config-volume\") pod \"collect-profiles-29402250-l2k2g\" (UID: \"ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g" Nov 26 05:30:00 crc kubenswrapper[4871]: I1126 05:30:00.410242 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6-config-volume\") pod \"collect-profiles-29402250-l2k2g\" (UID: \"ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g" Nov 26 05:30:00 crc kubenswrapper[4871]: I1126 05:30:00.420190 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6-secret-volume\") pod \"collect-profiles-29402250-l2k2g\" (UID: \"ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g" Nov 26 05:30:00 crc kubenswrapper[4871]: I1126 05:30:00.432093 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjkl9\" (UniqueName: \"kubernetes.io/projected/ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6-kube-api-access-tjkl9\") pod \"collect-profiles-29402250-l2k2g\" (UID: \"ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g" Nov 26 05:30:00 crc kubenswrapper[4871]: I1126 05:30:00.493548 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g" Nov 26 05:30:00 crc kubenswrapper[4871]: I1126 05:30:00.689815 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g"] Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.141750 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z482k"] Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.143155 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-z482k" podUID="b042c85e-dbcc-488a-afca-602eadb2a09a" containerName="registry-server" containerID="cri-o://1e36799ecca5371d19e6855a4b72bba207788b18f07f925614f527fa974a6730" gracePeriod=30 Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.158655 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vx568"] Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.158951 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vx568" podUID="20dfe3fa-af2d-4906-bc8c-21d863489308" containerName="registry-server" containerID="cri-o://f102650fe912485f8cfd47d1c8523f7c5e26f3dd1462dfea41272d17172cb1a5" gracePeriod=30 Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.169224 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ff9xx"] Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.169514 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" podUID="e028ebf2-4eb3-477a-be5d-ce02dd655d8d" containerName="marketplace-operator" containerID="cri-o://cab69f2facebd2679eadd70d1a6b8effc3f78f8a91d1f167a038c2ff418a6521" gracePeriod=30 Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.178511 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7n98c"] Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.178890 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7n98c" podUID="13a2fbc4-3140-412d-b990-9398453dc21c" containerName="registry-server" containerID="cri-o://4d4297a46ca0d290a9f22029d39d71a9985af8461fff2710af5acd1263b39bd5" gracePeriod=30 Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.188228 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qks66"] Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.189102 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-qks66" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.192892 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xpxnm"] Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.193114 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xpxnm" podUID="40308fed-3d95-4693-9efd-e44e891eb454" containerName="registry-server" containerID="cri-o://ba7df382eeec02e487961dd99f328a7ce290e0e7269b8e4924e614f308617e28" gracePeriod=30 Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.197790 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qks66"] Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.224826 4871 generic.go:334] "Generic (PLEG): container finished" podID="ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6" containerID="92c517dee00d279c24d34ac425449d70bcbaf0cab40f8e1af01c92f60fe8525c" exitCode=0 Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.224876 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g" event={"ID":"ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6","Type":"ContainerDied","Data":"92c517dee00d279c24d34ac425449d70bcbaf0cab40f8e1af01c92f60fe8525c"} Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.224905 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g" event={"ID":"ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6","Type":"ContainerStarted","Data":"c330bf46468ac36588feae9273b03973090655d5fe96bfaebacd9f3516529741"} Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.330920 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/65ad1a09-cc57-45f2-9a13-2d83b8b8221c-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qks66\" (UID: \"65ad1a09-cc57-45f2-9a13-2d83b8b8221c\") " pod="openshift-marketplace/marketplace-operator-79b997595-qks66" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.331007 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zz7ss\" (UniqueName: \"kubernetes.io/projected/65ad1a09-cc57-45f2-9a13-2d83b8b8221c-kube-api-access-zz7ss\") pod \"marketplace-operator-79b997595-qks66\" (UID: \"65ad1a09-cc57-45f2-9a13-2d83b8b8221c\") " pod="openshift-marketplace/marketplace-operator-79b997595-qks66" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.331173 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/65ad1a09-cc57-45f2-9a13-2d83b8b8221c-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qks66\" (UID: \"65ad1a09-cc57-45f2-9a13-2d83b8b8221c\") " pod="openshift-marketplace/marketplace-operator-79b997595-qks66" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.432363 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/65ad1a09-cc57-45f2-9a13-2d83b8b8221c-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qks66\" (UID: \"65ad1a09-cc57-45f2-9a13-2d83b8b8221c\") " pod="openshift-marketplace/marketplace-operator-79b997595-qks66" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.432660 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/65ad1a09-cc57-45f2-9a13-2d83b8b8221c-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qks66\" (UID: \"65ad1a09-cc57-45f2-9a13-2d83b8b8221c\") " pod="openshift-marketplace/marketplace-operator-79b997595-qks66" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.432699 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zz7ss\" (UniqueName: \"kubernetes.io/projected/65ad1a09-cc57-45f2-9a13-2d83b8b8221c-kube-api-access-zz7ss\") pod \"marketplace-operator-79b997595-qks66\" (UID: \"65ad1a09-cc57-45f2-9a13-2d83b8b8221c\") " pod="openshift-marketplace/marketplace-operator-79b997595-qks66" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.433935 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/65ad1a09-cc57-45f2-9a13-2d83b8b8221c-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qks66\" (UID: \"65ad1a09-cc57-45f2-9a13-2d83b8b8221c\") " pod="openshift-marketplace/marketplace-operator-79b997595-qks66" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.440737 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/65ad1a09-cc57-45f2-9a13-2d83b8b8221c-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qks66\" (UID: \"65ad1a09-cc57-45f2-9a13-2d83b8b8221c\") " pod="openshift-marketplace/marketplace-operator-79b997595-qks66" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.451984 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zz7ss\" (UniqueName: \"kubernetes.io/projected/65ad1a09-cc57-45f2-9a13-2d83b8b8221c-kube-api-access-zz7ss\") pod \"marketplace-operator-79b997595-qks66\" (UID: \"65ad1a09-cc57-45f2-9a13-2d83b8b8221c\") " pod="openshift-marketplace/marketplace-operator-79b997595-qks66" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.609031 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-qks66" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.613557 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z482k" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.622800 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vx568" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.622962 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.634867 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7n98c" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.636837 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xpxnm" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.735963 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e028ebf2-4eb3-477a-be5d-ce02dd655d8d-marketplace-operator-metrics\") pod \"e028ebf2-4eb3-477a-be5d-ce02dd655d8d\" (UID: \"e028ebf2-4eb3-477a-be5d-ce02dd655d8d\") " Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.736030 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13a2fbc4-3140-412d-b990-9398453dc21c-catalog-content\") pod \"13a2fbc4-3140-412d-b990-9398453dc21c\" (UID: \"13a2fbc4-3140-412d-b990-9398453dc21c\") " Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.736053 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgr7l\" (UniqueName: \"kubernetes.io/projected/b042c85e-dbcc-488a-afca-602eadb2a09a-kube-api-access-jgr7l\") pod \"b042c85e-dbcc-488a-afca-602eadb2a09a\" (UID: \"b042c85e-dbcc-488a-afca-602eadb2a09a\") " Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.736080 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85wmz\" (UniqueName: \"kubernetes.io/projected/13a2fbc4-3140-412d-b990-9398453dc21c-kube-api-access-85wmz\") pod \"13a2fbc4-3140-412d-b990-9398453dc21c\" (UID: \"13a2fbc4-3140-412d-b990-9398453dc21c\") " Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.736107 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b042c85e-dbcc-488a-afca-602eadb2a09a-utilities\") pod \"b042c85e-dbcc-488a-afca-602eadb2a09a\" (UID: \"b042c85e-dbcc-488a-afca-602eadb2a09a\") " Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.736125 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20dfe3fa-af2d-4906-bc8c-21d863489308-catalog-content\") pod \"20dfe3fa-af2d-4906-bc8c-21d863489308\" (UID: \"20dfe3fa-af2d-4906-bc8c-21d863489308\") " Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.736143 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40308fed-3d95-4693-9efd-e44e891eb454-catalog-content\") pod \"40308fed-3d95-4693-9efd-e44e891eb454\" (UID: \"40308fed-3d95-4693-9efd-e44e891eb454\") " Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.736162 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40308fed-3d95-4693-9efd-e44e891eb454-utilities\") pod \"40308fed-3d95-4693-9efd-e44e891eb454\" (UID: \"40308fed-3d95-4693-9efd-e44e891eb454\") " Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.736186 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7lhxd\" (UniqueName: \"kubernetes.io/projected/20dfe3fa-af2d-4906-bc8c-21d863489308-kube-api-access-7lhxd\") pod \"20dfe3fa-af2d-4906-bc8c-21d863489308\" (UID: \"20dfe3fa-af2d-4906-bc8c-21d863489308\") " Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.736217 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20dfe3fa-af2d-4906-bc8c-21d863489308-utilities\") pod \"20dfe3fa-af2d-4906-bc8c-21d863489308\" (UID: \"20dfe3fa-af2d-4906-bc8c-21d863489308\") " Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.736244 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13a2fbc4-3140-412d-b990-9398453dc21c-utilities\") pod \"13a2fbc4-3140-412d-b990-9398453dc21c\" (UID: \"13a2fbc4-3140-412d-b990-9398453dc21c\") " Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.736262 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e028ebf2-4eb3-477a-be5d-ce02dd655d8d-marketplace-trusted-ca\") pod \"e028ebf2-4eb3-477a-be5d-ce02dd655d8d\" (UID: \"e028ebf2-4eb3-477a-be5d-ce02dd655d8d\") " Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.736277 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6skp\" (UniqueName: \"kubernetes.io/projected/40308fed-3d95-4693-9efd-e44e891eb454-kube-api-access-c6skp\") pod \"40308fed-3d95-4693-9efd-e44e891eb454\" (UID: \"40308fed-3d95-4693-9efd-e44e891eb454\") " Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.736299 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b042c85e-dbcc-488a-afca-602eadb2a09a-catalog-content\") pod \"b042c85e-dbcc-488a-afca-602eadb2a09a\" (UID: \"b042c85e-dbcc-488a-afca-602eadb2a09a\") " Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.736331 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fxtxk\" (UniqueName: \"kubernetes.io/projected/e028ebf2-4eb3-477a-be5d-ce02dd655d8d-kube-api-access-fxtxk\") pod \"e028ebf2-4eb3-477a-be5d-ce02dd655d8d\" (UID: \"e028ebf2-4eb3-477a-be5d-ce02dd655d8d\") " Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.738736 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40308fed-3d95-4693-9efd-e44e891eb454-utilities" (OuterVolumeSpecName: "utilities") pod "40308fed-3d95-4693-9efd-e44e891eb454" (UID: "40308fed-3d95-4693-9efd-e44e891eb454"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.739811 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20dfe3fa-af2d-4906-bc8c-21d863489308-utilities" (OuterVolumeSpecName: "utilities") pod "20dfe3fa-af2d-4906-bc8c-21d863489308" (UID: "20dfe3fa-af2d-4906-bc8c-21d863489308"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.739933 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13a2fbc4-3140-412d-b990-9398453dc21c-utilities" (OuterVolumeSpecName: "utilities") pod "13a2fbc4-3140-412d-b990-9398453dc21c" (UID: "13a2fbc4-3140-412d-b990-9398453dc21c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.740040 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b042c85e-dbcc-488a-afca-602eadb2a09a-utilities" (OuterVolumeSpecName: "utilities") pod "b042c85e-dbcc-488a-afca-602eadb2a09a" (UID: "b042c85e-dbcc-488a-afca-602eadb2a09a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.740675 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20dfe3fa-af2d-4906-bc8c-21d863489308-kube-api-access-7lhxd" (OuterVolumeSpecName: "kube-api-access-7lhxd") pod "20dfe3fa-af2d-4906-bc8c-21d863489308" (UID: "20dfe3fa-af2d-4906-bc8c-21d863489308"). InnerVolumeSpecName "kube-api-access-7lhxd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.741942 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b042c85e-dbcc-488a-afca-602eadb2a09a-kube-api-access-jgr7l" (OuterVolumeSpecName: "kube-api-access-jgr7l") pod "b042c85e-dbcc-488a-afca-602eadb2a09a" (UID: "b042c85e-dbcc-488a-afca-602eadb2a09a"). InnerVolumeSpecName "kube-api-access-jgr7l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.741962 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40308fed-3d95-4693-9efd-e44e891eb454-kube-api-access-c6skp" (OuterVolumeSpecName: "kube-api-access-c6skp") pod "40308fed-3d95-4693-9efd-e44e891eb454" (UID: "40308fed-3d95-4693-9efd-e44e891eb454"). InnerVolumeSpecName "kube-api-access-c6skp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.742062 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e028ebf2-4eb3-477a-be5d-ce02dd655d8d-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "e028ebf2-4eb3-477a-be5d-ce02dd655d8d" (UID: "e028ebf2-4eb3-477a-be5d-ce02dd655d8d"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.742807 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e028ebf2-4eb3-477a-be5d-ce02dd655d8d-kube-api-access-fxtxk" (OuterVolumeSpecName: "kube-api-access-fxtxk") pod "e028ebf2-4eb3-477a-be5d-ce02dd655d8d" (UID: "e028ebf2-4eb3-477a-be5d-ce02dd655d8d"). InnerVolumeSpecName "kube-api-access-fxtxk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.743880 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e028ebf2-4eb3-477a-be5d-ce02dd655d8d-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "e028ebf2-4eb3-477a-be5d-ce02dd655d8d" (UID: "e028ebf2-4eb3-477a-be5d-ce02dd655d8d"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.744686 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13a2fbc4-3140-412d-b990-9398453dc21c-kube-api-access-85wmz" (OuterVolumeSpecName: "kube-api-access-85wmz") pod "13a2fbc4-3140-412d-b990-9398453dc21c" (UID: "13a2fbc4-3140-412d-b990-9398453dc21c"). InnerVolumeSpecName "kube-api-access-85wmz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.775111 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13a2fbc4-3140-412d-b990-9398453dc21c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "13a2fbc4-3140-412d-b990-9398453dc21c" (UID: "13a2fbc4-3140-412d-b990-9398453dc21c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.799691 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20dfe3fa-af2d-4906-bc8c-21d863489308-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "20dfe3fa-af2d-4906-bc8c-21d863489308" (UID: "20dfe3fa-af2d-4906-bc8c-21d863489308"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.810048 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b042c85e-dbcc-488a-afca-602eadb2a09a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b042c85e-dbcc-488a-afca-602eadb2a09a" (UID: "b042c85e-dbcc-488a-afca-602eadb2a09a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.837920 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fxtxk\" (UniqueName: \"kubernetes.io/projected/e028ebf2-4eb3-477a-be5d-ce02dd655d8d-kube-api-access-fxtxk\") on node \"crc\" DevicePath \"\"" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.837950 4871 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e028ebf2-4eb3-477a-be5d-ce02dd655d8d-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.837962 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13a2fbc4-3140-412d-b990-9398453dc21c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.837971 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgr7l\" (UniqueName: \"kubernetes.io/projected/b042c85e-dbcc-488a-afca-602eadb2a09a-kube-api-access-jgr7l\") on node \"crc\" DevicePath \"\"" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.837979 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85wmz\" (UniqueName: \"kubernetes.io/projected/13a2fbc4-3140-412d-b990-9398453dc21c-kube-api-access-85wmz\") on node \"crc\" DevicePath \"\"" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.837988 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b042c85e-dbcc-488a-afca-602eadb2a09a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.837998 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20dfe3fa-af2d-4906-bc8c-21d863489308-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.838007 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/40308fed-3d95-4693-9efd-e44e891eb454-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.838016 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7lhxd\" (UniqueName: \"kubernetes.io/projected/20dfe3fa-af2d-4906-bc8c-21d863489308-kube-api-access-7lhxd\") on node \"crc\" DevicePath \"\"" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.838025 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20dfe3fa-af2d-4906-bc8c-21d863489308-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.838033 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13a2fbc4-3140-412d-b990-9398453dc21c-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.838042 4871 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e028ebf2-4eb3-477a-be5d-ce02dd655d8d-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.838051 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6skp\" (UniqueName: \"kubernetes.io/projected/40308fed-3d95-4693-9efd-e44e891eb454-kube-api-access-c6skp\") on node \"crc\" DevicePath \"\"" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.838060 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b042c85e-dbcc-488a-afca-602eadb2a09a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.861636 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40308fed-3d95-4693-9efd-e44e891eb454-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "40308fed-3d95-4693-9efd-e44e891eb454" (UID: "40308fed-3d95-4693-9efd-e44e891eb454"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:30:01 crc kubenswrapper[4871]: I1126 05:30:01.939049 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/40308fed-3d95-4693-9efd-e44e891eb454-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.017122 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qks66"] Nov 26 05:30:02 crc kubenswrapper[4871]: W1126 05:30:02.021347 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod65ad1a09_cc57_45f2_9a13_2d83b8b8221c.slice/crio-788ecde705b36d77d86a86b73c718cfc5edc096d1e37e250c241b1d4a1026d57 WatchSource:0}: Error finding container 788ecde705b36d77d86a86b73c718cfc5edc096d1e37e250c241b1d4a1026d57: Status 404 returned error can't find the container with id 788ecde705b36d77d86a86b73c718cfc5edc096d1e37e250c241b1d4a1026d57 Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.230547 4871 generic.go:334] "Generic (PLEG): container finished" podID="40308fed-3d95-4693-9efd-e44e891eb454" containerID="ba7df382eeec02e487961dd99f328a7ce290e0e7269b8e4924e614f308617e28" exitCode=0 Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.230599 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xpxnm" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.230599 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xpxnm" event={"ID":"40308fed-3d95-4693-9efd-e44e891eb454","Type":"ContainerDied","Data":"ba7df382eeec02e487961dd99f328a7ce290e0e7269b8e4924e614f308617e28"} Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.230718 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xpxnm" event={"ID":"40308fed-3d95-4693-9efd-e44e891eb454","Type":"ContainerDied","Data":"0bd81754b16a5ae019424deb7df461f0816de13c9086c55283798278143d6ffe"} Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.230745 4871 scope.go:117] "RemoveContainer" containerID="ba7df382eeec02e487961dd99f328a7ce290e0e7269b8e4924e614f308617e28" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.233726 4871 generic.go:334] "Generic (PLEG): container finished" podID="20dfe3fa-af2d-4906-bc8c-21d863489308" containerID="f102650fe912485f8cfd47d1c8523f7c5e26f3dd1462dfea41272d17172cb1a5" exitCode=0 Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.233760 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vx568" event={"ID":"20dfe3fa-af2d-4906-bc8c-21d863489308","Type":"ContainerDied","Data":"f102650fe912485f8cfd47d1c8523f7c5e26f3dd1462dfea41272d17172cb1a5"} Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.233982 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vx568" event={"ID":"20dfe3fa-af2d-4906-bc8c-21d863489308","Type":"ContainerDied","Data":"1cc594b1b535c24a4607fd9607547d02189c243aa739d09455c365925fbc5862"} Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.233781 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vx568" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.235905 4871 generic.go:334] "Generic (PLEG): container finished" podID="e028ebf2-4eb3-477a-be5d-ce02dd655d8d" containerID="cab69f2facebd2679eadd70d1a6b8effc3f78f8a91d1f167a038c2ff418a6521" exitCode=0 Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.235971 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" event={"ID":"e028ebf2-4eb3-477a-be5d-ce02dd655d8d","Type":"ContainerDied","Data":"cab69f2facebd2679eadd70d1a6b8effc3f78f8a91d1f167a038c2ff418a6521"} Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.235988 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.235995 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ff9xx" event={"ID":"e028ebf2-4eb3-477a-be5d-ce02dd655d8d","Type":"ContainerDied","Data":"265eeea807e3528d8e00c9223297625c6daca57569ac5ab9ba5e47203a916332"} Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.239606 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qks66" event={"ID":"65ad1a09-cc57-45f2-9a13-2d83b8b8221c","Type":"ContainerStarted","Data":"0bafc9f9bfd0d150338e298bea926cb02c244da107018860cf8a793e5871c5e4"} Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.239642 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qks66" event={"ID":"65ad1a09-cc57-45f2-9a13-2d83b8b8221c","Type":"ContainerStarted","Data":"788ecde705b36d77d86a86b73c718cfc5edc096d1e37e250c241b1d4a1026d57"} Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.239863 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-qks66" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.240970 4871 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-qks66 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.56:8080/healthz\": dial tcp 10.217.0.56:8080: connect: connection refused" start-of-body= Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.241005 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-qks66" podUID="65ad1a09-cc57-45f2-9a13-2d83b8b8221c" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.56:8080/healthz\": dial tcp 10.217.0.56:8080: connect: connection refused" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.243140 4871 generic.go:334] "Generic (PLEG): container finished" podID="b042c85e-dbcc-488a-afca-602eadb2a09a" containerID="1e36799ecca5371d19e6855a4b72bba207788b18f07f925614f527fa974a6730" exitCode=0 Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.243199 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z482k" event={"ID":"b042c85e-dbcc-488a-afca-602eadb2a09a","Type":"ContainerDied","Data":"1e36799ecca5371d19e6855a4b72bba207788b18f07f925614f527fa974a6730"} Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.243223 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z482k" event={"ID":"b042c85e-dbcc-488a-afca-602eadb2a09a","Type":"ContainerDied","Data":"b8ec6da5d418c74c5c2880b466e5f0d0619840a8bf9d289432576b5c4ba7f62a"} Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.243318 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z482k" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.249994 4871 generic.go:334] "Generic (PLEG): container finished" podID="13a2fbc4-3140-412d-b990-9398453dc21c" containerID="4d4297a46ca0d290a9f22029d39d71a9985af8461fff2710af5acd1263b39bd5" exitCode=0 Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.250233 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7n98c" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.250616 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7n98c" event={"ID":"13a2fbc4-3140-412d-b990-9398453dc21c","Type":"ContainerDied","Data":"4d4297a46ca0d290a9f22029d39d71a9985af8461fff2710af5acd1263b39bd5"} Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.250643 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7n98c" event={"ID":"13a2fbc4-3140-412d-b990-9398453dc21c","Type":"ContainerDied","Data":"c3d799ffa824bf56c0e63ad5ccbd25f54015fa42553ef9cfde607580b6d61605"} Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.250891 4871 scope.go:117] "RemoveContainer" containerID="493405ab0c343a579ba9dbee9856bce9592751cee8082fdf3930e8fb542d8abb" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.275394 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xpxnm"] Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.280501 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xpxnm"] Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.280667 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-qks66" podStartSLOduration=1.280653381 podStartE2EDuration="1.280653381s" podCreationTimestamp="2025-11-26 05:30:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:30:02.278559504 +0000 UTC m=+260.461611090" watchObservedRunningTime="2025-11-26 05:30:02.280653381 +0000 UTC m=+260.463704967" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.292640 4871 scope.go:117] "RemoveContainer" containerID="b1a39ceaad8b9acaa64c3fd3394a1dba86959248b9d698be18b95f3f320f6a18" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.302554 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z482k"] Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.317620 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-z482k"] Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.318001 4871 scope.go:117] "RemoveContainer" containerID="ba7df382eeec02e487961dd99f328a7ce290e0e7269b8e4924e614f308617e28" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.321144 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7n98c"] Nov 26 05:30:02 crc kubenswrapper[4871]: E1126 05:30:02.321712 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba7df382eeec02e487961dd99f328a7ce290e0e7269b8e4924e614f308617e28\": container with ID starting with ba7df382eeec02e487961dd99f328a7ce290e0e7269b8e4924e614f308617e28 not found: ID does not exist" containerID="ba7df382eeec02e487961dd99f328a7ce290e0e7269b8e4924e614f308617e28" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.321745 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba7df382eeec02e487961dd99f328a7ce290e0e7269b8e4924e614f308617e28"} err="failed to get container status \"ba7df382eeec02e487961dd99f328a7ce290e0e7269b8e4924e614f308617e28\": rpc error: code = NotFound desc = could not find container \"ba7df382eeec02e487961dd99f328a7ce290e0e7269b8e4924e614f308617e28\": container with ID starting with ba7df382eeec02e487961dd99f328a7ce290e0e7269b8e4924e614f308617e28 not found: ID does not exist" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.321769 4871 scope.go:117] "RemoveContainer" containerID="493405ab0c343a579ba9dbee9856bce9592751cee8082fdf3930e8fb542d8abb" Nov 26 05:30:02 crc kubenswrapper[4871]: E1126 05:30:02.322173 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"493405ab0c343a579ba9dbee9856bce9592751cee8082fdf3930e8fb542d8abb\": container with ID starting with 493405ab0c343a579ba9dbee9856bce9592751cee8082fdf3930e8fb542d8abb not found: ID does not exist" containerID="493405ab0c343a579ba9dbee9856bce9592751cee8082fdf3930e8fb542d8abb" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.322204 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"493405ab0c343a579ba9dbee9856bce9592751cee8082fdf3930e8fb542d8abb"} err="failed to get container status \"493405ab0c343a579ba9dbee9856bce9592751cee8082fdf3930e8fb542d8abb\": rpc error: code = NotFound desc = could not find container \"493405ab0c343a579ba9dbee9856bce9592751cee8082fdf3930e8fb542d8abb\": container with ID starting with 493405ab0c343a579ba9dbee9856bce9592751cee8082fdf3930e8fb542d8abb not found: ID does not exist" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.322222 4871 scope.go:117] "RemoveContainer" containerID="b1a39ceaad8b9acaa64c3fd3394a1dba86959248b9d698be18b95f3f320f6a18" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.324361 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7n98c"] Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.328691 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vx568"] Nov 26 05:30:02 crc kubenswrapper[4871]: E1126 05:30:02.333629 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1a39ceaad8b9acaa64c3fd3394a1dba86959248b9d698be18b95f3f320f6a18\": container with ID starting with b1a39ceaad8b9acaa64c3fd3394a1dba86959248b9d698be18b95f3f320f6a18 not found: ID does not exist" containerID="b1a39ceaad8b9acaa64c3fd3394a1dba86959248b9d698be18b95f3f320f6a18" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.333686 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1a39ceaad8b9acaa64c3fd3394a1dba86959248b9d698be18b95f3f320f6a18"} err="failed to get container status \"b1a39ceaad8b9acaa64c3fd3394a1dba86959248b9d698be18b95f3f320f6a18\": rpc error: code = NotFound desc = could not find container \"b1a39ceaad8b9acaa64c3fd3394a1dba86959248b9d698be18b95f3f320f6a18\": container with ID starting with b1a39ceaad8b9acaa64c3fd3394a1dba86959248b9d698be18b95f3f320f6a18 not found: ID does not exist" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.333720 4871 scope.go:117] "RemoveContainer" containerID="f102650fe912485f8cfd47d1c8523f7c5e26f3dd1462dfea41272d17172cb1a5" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.337802 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vx568"] Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.349482 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ff9xx"] Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.358094 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ff9xx"] Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.385692 4871 scope.go:117] "RemoveContainer" containerID="c0940145b0104827f04c6547d4ae8675502ce7ac81fb8530c8f8644adf10c2e8" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.406836 4871 scope.go:117] "RemoveContainer" containerID="19920aefe3e724ed0095c17809e56a69f82e308ecc2def3813d6cb2acf3c2033" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.488177 4871 scope.go:117] "RemoveContainer" containerID="f102650fe912485f8cfd47d1c8523f7c5e26f3dd1462dfea41272d17172cb1a5" Nov 26 05:30:02 crc kubenswrapper[4871]: E1126 05:30:02.488922 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f102650fe912485f8cfd47d1c8523f7c5e26f3dd1462dfea41272d17172cb1a5\": container with ID starting with f102650fe912485f8cfd47d1c8523f7c5e26f3dd1462dfea41272d17172cb1a5 not found: ID does not exist" containerID="f102650fe912485f8cfd47d1c8523f7c5e26f3dd1462dfea41272d17172cb1a5" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.488964 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f102650fe912485f8cfd47d1c8523f7c5e26f3dd1462dfea41272d17172cb1a5"} err="failed to get container status \"f102650fe912485f8cfd47d1c8523f7c5e26f3dd1462dfea41272d17172cb1a5\": rpc error: code = NotFound desc = could not find container \"f102650fe912485f8cfd47d1c8523f7c5e26f3dd1462dfea41272d17172cb1a5\": container with ID starting with f102650fe912485f8cfd47d1c8523f7c5e26f3dd1462dfea41272d17172cb1a5 not found: ID does not exist" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.488982 4871 scope.go:117] "RemoveContainer" containerID="c0940145b0104827f04c6547d4ae8675502ce7ac81fb8530c8f8644adf10c2e8" Nov 26 05:30:02 crc kubenswrapper[4871]: E1126 05:30:02.489451 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0940145b0104827f04c6547d4ae8675502ce7ac81fb8530c8f8644adf10c2e8\": container with ID starting with c0940145b0104827f04c6547d4ae8675502ce7ac81fb8530c8f8644adf10c2e8 not found: ID does not exist" containerID="c0940145b0104827f04c6547d4ae8675502ce7ac81fb8530c8f8644adf10c2e8" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.489493 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0940145b0104827f04c6547d4ae8675502ce7ac81fb8530c8f8644adf10c2e8"} err="failed to get container status \"c0940145b0104827f04c6547d4ae8675502ce7ac81fb8530c8f8644adf10c2e8\": rpc error: code = NotFound desc = could not find container \"c0940145b0104827f04c6547d4ae8675502ce7ac81fb8530c8f8644adf10c2e8\": container with ID starting with c0940145b0104827f04c6547d4ae8675502ce7ac81fb8530c8f8644adf10c2e8 not found: ID does not exist" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.489542 4871 scope.go:117] "RemoveContainer" containerID="19920aefe3e724ed0095c17809e56a69f82e308ecc2def3813d6cb2acf3c2033" Nov 26 05:30:02 crc kubenswrapper[4871]: E1126 05:30:02.490293 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19920aefe3e724ed0095c17809e56a69f82e308ecc2def3813d6cb2acf3c2033\": container with ID starting with 19920aefe3e724ed0095c17809e56a69f82e308ecc2def3813d6cb2acf3c2033 not found: ID does not exist" containerID="19920aefe3e724ed0095c17809e56a69f82e308ecc2def3813d6cb2acf3c2033" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.490318 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19920aefe3e724ed0095c17809e56a69f82e308ecc2def3813d6cb2acf3c2033"} err="failed to get container status \"19920aefe3e724ed0095c17809e56a69f82e308ecc2def3813d6cb2acf3c2033\": rpc error: code = NotFound desc = could not find container \"19920aefe3e724ed0095c17809e56a69f82e308ecc2def3813d6cb2acf3c2033\": container with ID starting with 19920aefe3e724ed0095c17809e56a69f82e308ecc2def3813d6cb2acf3c2033 not found: ID does not exist" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.490333 4871 scope.go:117] "RemoveContainer" containerID="cab69f2facebd2679eadd70d1a6b8effc3f78f8a91d1f167a038c2ff418a6521" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.515000 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13a2fbc4-3140-412d-b990-9398453dc21c" path="/var/lib/kubelet/pods/13a2fbc4-3140-412d-b990-9398453dc21c/volumes" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.515765 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20dfe3fa-af2d-4906-bc8c-21d863489308" path="/var/lib/kubelet/pods/20dfe3fa-af2d-4906-bc8c-21d863489308/volumes" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.516471 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40308fed-3d95-4693-9efd-e44e891eb454" path="/var/lib/kubelet/pods/40308fed-3d95-4693-9efd-e44e891eb454/volumes" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.517725 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b042c85e-dbcc-488a-afca-602eadb2a09a" path="/var/lib/kubelet/pods/b042c85e-dbcc-488a-afca-602eadb2a09a/volumes" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.518423 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e028ebf2-4eb3-477a-be5d-ce02dd655d8d" path="/var/lib/kubelet/pods/e028ebf2-4eb3-477a-be5d-ce02dd655d8d/volumes" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.530289 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.538450 4871 scope.go:117] "RemoveContainer" containerID="cab69f2facebd2679eadd70d1a6b8effc3f78f8a91d1f167a038c2ff418a6521" Nov 26 05:30:02 crc kubenswrapper[4871]: E1126 05:30:02.538901 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cab69f2facebd2679eadd70d1a6b8effc3f78f8a91d1f167a038c2ff418a6521\": container with ID starting with cab69f2facebd2679eadd70d1a6b8effc3f78f8a91d1f167a038c2ff418a6521 not found: ID does not exist" containerID="cab69f2facebd2679eadd70d1a6b8effc3f78f8a91d1f167a038c2ff418a6521" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.538936 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cab69f2facebd2679eadd70d1a6b8effc3f78f8a91d1f167a038c2ff418a6521"} err="failed to get container status \"cab69f2facebd2679eadd70d1a6b8effc3f78f8a91d1f167a038c2ff418a6521\": rpc error: code = NotFound desc = could not find container \"cab69f2facebd2679eadd70d1a6b8effc3f78f8a91d1f167a038c2ff418a6521\": container with ID starting with cab69f2facebd2679eadd70d1a6b8effc3f78f8a91d1f167a038c2ff418a6521 not found: ID does not exist" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.538960 4871 scope.go:117] "RemoveContainer" containerID="1e36799ecca5371d19e6855a4b72bba207788b18f07f925614f527fa974a6730" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.558885 4871 scope.go:117] "RemoveContainer" containerID="ed3921840d56d1925627b97aabc59f2724d886b831ea77c3e319793bd1c6fb8a" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.580235 4871 scope.go:117] "RemoveContainer" containerID="a36d4fcc698e9c7b2654edaffe605b5b47cd96fc18a45d0907f633a6cda30879" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.596624 4871 scope.go:117] "RemoveContainer" containerID="1e36799ecca5371d19e6855a4b72bba207788b18f07f925614f527fa974a6730" Nov 26 05:30:02 crc kubenswrapper[4871]: E1126 05:30:02.597674 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e36799ecca5371d19e6855a4b72bba207788b18f07f925614f527fa974a6730\": container with ID starting with 1e36799ecca5371d19e6855a4b72bba207788b18f07f925614f527fa974a6730 not found: ID does not exist" containerID="1e36799ecca5371d19e6855a4b72bba207788b18f07f925614f527fa974a6730" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.597718 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e36799ecca5371d19e6855a4b72bba207788b18f07f925614f527fa974a6730"} err="failed to get container status \"1e36799ecca5371d19e6855a4b72bba207788b18f07f925614f527fa974a6730\": rpc error: code = NotFound desc = could not find container \"1e36799ecca5371d19e6855a4b72bba207788b18f07f925614f527fa974a6730\": container with ID starting with 1e36799ecca5371d19e6855a4b72bba207788b18f07f925614f527fa974a6730 not found: ID does not exist" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.597751 4871 scope.go:117] "RemoveContainer" containerID="ed3921840d56d1925627b97aabc59f2724d886b831ea77c3e319793bd1c6fb8a" Nov 26 05:30:02 crc kubenswrapper[4871]: E1126 05:30:02.598210 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed3921840d56d1925627b97aabc59f2724d886b831ea77c3e319793bd1c6fb8a\": container with ID starting with ed3921840d56d1925627b97aabc59f2724d886b831ea77c3e319793bd1c6fb8a not found: ID does not exist" containerID="ed3921840d56d1925627b97aabc59f2724d886b831ea77c3e319793bd1c6fb8a" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.598236 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed3921840d56d1925627b97aabc59f2724d886b831ea77c3e319793bd1c6fb8a"} err="failed to get container status \"ed3921840d56d1925627b97aabc59f2724d886b831ea77c3e319793bd1c6fb8a\": rpc error: code = NotFound desc = could not find container \"ed3921840d56d1925627b97aabc59f2724d886b831ea77c3e319793bd1c6fb8a\": container with ID starting with ed3921840d56d1925627b97aabc59f2724d886b831ea77c3e319793bd1c6fb8a not found: ID does not exist" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.598251 4871 scope.go:117] "RemoveContainer" containerID="a36d4fcc698e9c7b2654edaffe605b5b47cd96fc18a45d0907f633a6cda30879" Nov 26 05:30:02 crc kubenswrapper[4871]: E1126 05:30:02.598744 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a36d4fcc698e9c7b2654edaffe605b5b47cd96fc18a45d0907f633a6cda30879\": container with ID starting with a36d4fcc698e9c7b2654edaffe605b5b47cd96fc18a45d0907f633a6cda30879 not found: ID does not exist" containerID="a36d4fcc698e9c7b2654edaffe605b5b47cd96fc18a45d0907f633a6cda30879" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.598781 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a36d4fcc698e9c7b2654edaffe605b5b47cd96fc18a45d0907f633a6cda30879"} err="failed to get container status \"a36d4fcc698e9c7b2654edaffe605b5b47cd96fc18a45d0907f633a6cda30879\": rpc error: code = NotFound desc = could not find container \"a36d4fcc698e9c7b2654edaffe605b5b47cd96fc18a45d0907f633a6cda30879\": container with ID starting with a36d4fcc698e9c7b2654edaffe605b5b47cd96fc18a45d0907f633a6cda30879 not found: ID does not exist" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.598811 4871 scope.go:117] "RemoveContainer" containerID="4d4297a46ca0d290a9f22029d39d71a9985af8461fff2710af5acd1263b39bd5" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.615328 4871 scope.go:117] "RemoveContainer" containerID="feea5d8d7bf7aac13b172362a82a64dafac1eb2f39d6a5add028ef2ca7297ec3" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.628204 4871 scope.go:117] "RemoveContainer" containerID="1c544dca21fa3870ebcca76311a6c1603554e3298ccca45e5d6127178eea8ed1" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.640611 4871 scope.go:117] "RemoveContainer" containerID="4d4297a46ca0d290a9f22029d39d71a9985af8461fff2710af5acd1263b39bd5" Nov 26 05:30:02 crc kubenswrapper[4871]: E1126 05:30:02.640939 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d4297a46ca0d290a9f22029d39d71a9985af8461fff2710af5acd1263b39bd5\": container with ID starting with 4d4297a46ca0d290a9f22029d39d71a9985af8461fff2710af5acd1263b39bd5 not found: ID does not exist" containerID="4d4297a46ca0d290a9f22029d39d71a9985af8461fff2710af5acd1263b39bd5" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.640966 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d4297a46ca0d290a9f22029d39d71a9985af8461fff2710af5acd1263b39bd5"} err="failed to get container status \"4d4297a46ca0d290a9f22029d39d71a9985af8461fff2710af5acd1263b39bd5\": rpc error: code = NotFound desc = could not find container \"4d4297a46ca0d290a9f22029d39d71a9985af8461fff2710af5acd1263b39bd5\": container with ID starting with 4d4297a46ca0d290a9f22029d39d71a9985af8461fff2710af5acd1263b39bd5 not found: ID does not exist" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.640988 4871 scope.go:117] "RemoveContainer" containerID="feea5d8d7bf7aac13b172362a82a64dafac1eb2f39d6a5add028ef2ca7297ec3" Nov 26 05:30:02 crc kubenswrapper[4871]: E1126 05:30:02.641253 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"feea5d8d7bf7aac13b172362a82a64dafac1eb2f39d6a5add028ef2ca7297ec3\": container with ID starting with feea5d8d7bf7aac13b172362a82a64dafac1eb2f39d6a5add028ef2ca7297ec3 not found: ID does not exist" containerID="feea5d8d7bf7aac13b172362a82a64dafac1eb2f39d6a5add028ef2ca7297ec3" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.641273 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"feea5d8d7bf7aac13b172362a82a64dafac1eb2f39d6a5add028ef2ca7297ec3"} err="failed to get container status \"feea5d8d7bf7aac13b172362a82a64dafac1eb2f39d6a5add028ef2ca7297ec3\": rpc error: code = NotFound desc = could not find container \"feea5d8d7bf7aac13b172362a82a64dafac1eb2f39d6a5add028ef2ca7297ec3\": container with ID starting with feea5d8d7bf7aac13b172362a82a64dafac1eb2f39d6a5add028ef2ca7297ec3 not found: ID does not exist" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.641290 4871 scope.go:117] "RemoveContainer" containerID="1c544dca21fa3870ebcca76311a6c1603554e3298ccca45e5d6127178eea8ed1" Nov 26 05:30:02 crc kubenswrapper[4871]: E1126 05:30:02.641670 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c544dca21fa3870ebcca76311a6c1603554e3298ccca45e5d6127178eea8ed1\": container with ID starting with 1c544dca21fa3870ebcca76311a6c1603554e3298ccca45e5d6127178eea8ed1 not found: ID does not exist" containerID="1c544dca21fa3870ebcca76311a6c1603554e3298ccca45e5d6127178eea8ed1" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.641690 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c544dca21fa3870ebcca76311a6c1603554e3298ccca45e5d6127178eea8ed1"} err="failed to get container status \"1c544dca21fa3870ebcca76311a6c1603554e3298ccca45e5d6127178eea8ed1\": rpc error: code = NotFound desc = could not find container \"1c544dca21fa3870ebcca76311a6c1603554e3298ccca45e5d6127178eea8ed1\": container with ID starting with 1c544dca21fa3870ebcca76311a6c1603554e3298ccca45e5d6127178eea8ed1 not found: ID does not exist" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.645930 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6-secret-volume\") pod \"ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6\" (UID: \"ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6\") " Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.645991 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6-config-volume\") pod \"ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6\" (UID: \"ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6\") " Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.646038 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tjkl9\" (UniqueName: \"kubernetes.io/projected/ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6-kube-api-access-tjkl9\") pod \"ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6\" (UID: \"ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6\") " Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.647886 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6-config-volume" (OuterVolumeSpecName: "config-volume") pod "ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6" (UID: "ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.652238 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6" (UID: "ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.652335 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6-kube-api-access-tjkl9" (OuterVolumeSpecName: "kube-api-access-tjkl9") pod "ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6" (UID: "ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6"). InnerVolumeSpecName "kube-api-access-tjkl9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.746949 4871 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.746982 4871 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 05:30:02 crc kubenswrapper[4871]: I1126 05:30:02.746991 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tjkl9\" (UniqueName: \"kubernetes.io/projected/ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6-kube-api-access-tjkl9\") on node \"crc\" DevicePath \"\"" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.258543 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g" event={"ID":"ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6","Type":"ContainerDied","Data":"c330bf46468ac36588feae9273b03973090655d5fe96bfaebacd9f3516529741"} Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.258578 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c330bf46468ac36588feae9273b03973090655d5fe96bfaebacd9f3516529741" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.258594 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.265351 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-qks66" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346312 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4qdhh"] Nov 26 05:30:03 crc kubenswrapper[4871]: E1126 05:30:03.346568 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13a2fbc4-3140-412d-b990-9398453dc21c" containerName="registry-server" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346585 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="13a2fbc4-3140-412d-b990-9398453dc21c" containerName="registry-server" Nov 26 05:30:03 crc kubenswrapper[4871]: E1126 05:30:03.346594 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b042c85e-dbcc-488a-afca-602eadb2a09a" containerName="extract-content" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346601 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="b042c85e-dbcc-488a-afca-602eadb2a09a" containerName="extract-content" Nov 26 05:30:03 crc kubenswrapper[4871]: E1126 05:30:03.346611 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b042c85e-dbcc-488a-afca-602eadb2a09a" containerName="extract-utilities" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346617 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="b042c85e-dbcc-488a-afca-602eadb2a09a" containerName="extract-utilities" Nov 26 05:30:03 crc kubenswrapper[4871]: E1126 05:30:03.346625 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20dfe3fa-af2d-4906-bc8c-21d863489308" containerName="extract-content" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346630 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="20dfe3fa-af2d-4906-bc8c-21d863489308" containerName="extract-content" Nov 26 05:30:03 crc kubenswrapper[4871]: E1126 05:30:03.346638 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40308fed-3d95-4693-9efd-e44e891eb454" containerName="extract-content" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346644 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="40308fed-3d95-4693-9efd-e44e891eb454" containerName="extract-content" Nov 26 05:30:03 crc kubenswrapper[4871]: E1126 05:30:03.346650 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13a2fbc4-3140-412d-b990-9398453dc21c" containerName="extract-content" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346656 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="13a2fbc4-3140-412d-b990-9398453dc21c" containerName="extract-content" Nov 26 05:30:03 crc kubenswrapper[4871]: E1126 05:30:03.346665 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e028ebf2-4eb3-477a-be5d-ce02dd655d8d" containerName="marketplace-operator" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346671 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="e028ebf2-4eb3-477a-be5d-ce02dd655d8d" containerName="marketplace-operator" Nov 26 05:30:03 crc kubenswrapper[4871]: E1126 05:30:03.346681 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13a2fbc4-3140-412d-b990-9398453dc21c" containerName="extract-utilities" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346688 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="13a2fbc4-3140-412d-b990-9398453dc21c" containerName="extract-utilities" Nov 26 05:30:03 crc kubenswrapper[4871]: E1126 05:30:03.346694 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40308fed-3d95-4693-9efd-e44e891eb454" containerName="extract-utilities" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346700 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="40308fed-3d95-4693-9efd-e44e891eb454" containerName="extract-utilities" Nov 26 05:30:03 crc kubenswrapper[4871]: E1126 05:30:03.346707 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6" containerName="collect-profiles" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346712 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6" containerName="collect-profiles" Nov 26 05:30:03 crc kubenswrapper[4871]: E1126 05:30:03.346719 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b042c85e-dbcc-488a-afca-602eadb2a09a" containerName="registry-server" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346724 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="b042c85e-dbcc-488a-afca-602eadb2a09a" containerName="registry-server" Nov 26 05:30:03 crc kubenswrapper[4871]: E1126 05:30:03.346733 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20dfe3fa-af2d-4906-bc8c-21d863489308" containerName="extract-utilities" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346738 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="20dfe3fa-af2d-4906-bc8c-21d863489308" containerName="extract-utilities" Nov 26 05:30:03 crc kubenswrapper[4871]: E1126 05:30:03.346747 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40308fed-3d95-4693-9efd-e44e891eb454" containerName="registry-server" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346752 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="40308fed-3d95-4693-9efd-e44e891eb454" containerName="registry-server" Nov 26 05:30:03 crc kubenswrapper[4871]: E1126 05:30:03.346761 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20dfe3fa-af2d-4906-bc8c-21d863489308" containerName="registry-server" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346766 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="20dfe3fa-af2d-4906-bc8c-21d863489308" containerName="registry-server" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346854 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="13a2fbc4-3140-412d-b990-9398453dc21c" containerName="registry-server" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346874 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6" containerName="collect-profiles" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346883 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="20dfe3fa-af2d-4906-bc8c-21d863489308" containerName="registry-server" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346889 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="40308fed-3d95-4693-9efd-e44e891eb454" containerName="registry-server" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346899 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="b042c85e-dbcc-488a-afca-602eadb2a09a" containerName="registry-server" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.346906 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="e028ebf2-4eb3-477a-be5d-ce02dd655d8d" containerName="marketplace-operator" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.347610 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4qdhh" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.353950 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.358646 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4qdhh"] Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.454905 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqspk\" (UniqueName: \"kubernetes.io/projected/4151ee9a-4d65-4438-bf55-d437df2482d8-kube-api-access-bqspk\") pod \"certified-operators-4qdhh\" (UID: \"4151ee9a-4d65-4438-bf55-d437df2482d8\") " pod="openshift-marketplace/certified-operators-4qdhh" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.454959 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4151ee9a-4d65-4438-bf55-d437df2482d8-utilities\") pod \"certified-operators-4qdhh\" (UID: \"4151ee9a-4d65-4438-bf55-d437df2482d8\") " pod="openshift-marketplace/certified-operators-4qdhh" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.455012 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4151ee9a-4d65-4438-bf55-d437df2482d8-catalog-content\") pod \"certified-operators-4qdhh\" (UID: \"4151ee9a-4d65-4438-bf55-d437df2482d8\") " pod="openshift-marketplace/certified-operators-4qdhh" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.544881 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fqzs2"] Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.546084 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fqzs2" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.549907 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.561283 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqspk\" (UniqueName: \"kubernetes.io/projected/4151ee9a-4d65-4438-bf55-d437df2482d8-kube-api-access-bqspk\") pod \"certified-operators-4qdhh\" (UID: \"4151ee9a-4d65-4438-bf55-d437df2482d8\") " pod="openshift-marketplace/certified-operators-4qdhh" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.561338 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4151ee9a-4d65-4438-bf55-d437df2482d8-utilities\") pod \"certified-operators-4qdhh\" (UID: \"4151ee9a-4d65-4438-bf55-d437df2482d8\") " pod="openshift-marketplace/certified-operators-4qdhh" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.561385 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4151ee9a-4d65-4438-bf55-d437df2482d8-catalog-content\") pod \"certified-operators-4qdhh\" (UID: \"4151ee9a-4d65-4438-bf55-d437df2482d8\") " pod="openshift-marketplace/certified-operators-4qdhh" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.561868 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4151ee9a-4d65-4438-bf55-d437df2482d8-catalog-content\") pod \"certified-operators-4qdhh\" (UID: \"4151ee9a-4d65-4438-bf55-d437df2482d8\") " pod="openshift-marketplace/certified-operators-4qdhh" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.563744 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fqzs2"] Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.564334 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4151ee9a-4d65-4438-bf55-d437df2482d8-utilities\") pod \"certified-operators-4qdhh\" (UID: \"4151ee9a-4d65-4438-bf55-d437df2482d8\") " pod="openshift-marketplace/certified-operators-4qdhh" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.584475 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqspk\" (UniqueName: \"kubernetes.io/projected/4151ee9a-4d65-4438-bf55-d437df2482d8-kube-api-access-bqspk\") pod \"certified-operators-4qdhh\" (UID: \"4151ee9a-4d65-4438-bf55-d437df2482d8\") " pod="openshift-marketplace/certified-operators-4qdhh" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.662377 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9879edf7-a11e-49fa-a1ad-b8057cc59072-utilities\") pod \"redhat-marketplace-fqzs2\" (UID: \"9879edf7-a11e-49fa-a1ad-b8057cc59072\") " pod="openshift-marketplace/redhat-marketplace-fqzs2" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.662420 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9879edf7-a11e-49fa-a1ad-b8057cc59072-catalog-content\") pod \"redhat-marketplace-fqzs2\" (UID: \"9879edf7-a11e-49fa-a1ad-b8057cc59072\") " pod="openshift-marketplace/redhat-marketplace-fqzs2" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.662477 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zm7rn\" (UniqueName: \"kubernetes.io/projected/9879edf7-a11e-49fa-a1ad-b8057cc59072-kube-api-access-zm7rn\") pod \"redhat-marketplace-fqzs2\" (UID: \"9879edf7-a11e-49fa-a1ad-b8057cc59072\") " pod="openshift-marketplace/redhat-marketplace-fqzs2" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.667195 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4qdhh" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.766079 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zm7rn\" (UniqueName: \"kubernetes.io/projected/9879edf7-a11e-49fa-a1ad-b8057cc59072-kube-api-access-zm7rn\") pod \"redhat-marketplace-fqzs2\" (UID: \"9879edf7-a11e-49fa-a1ad-b8057cc59072\") " pod="openshift-marketplace/redhat-marketplace-fqzs2" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.766140 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9879edf7-a11e-49fa-a1ad-b8057cc59072-utilities\") pod \"redhat-marketplace-fqzs2\" (UID: \"9879edf7-a11e-49fa-a1ad-b8057cc59072\") " pod="openshift-marketplace/redhat-marketplace-fqzs2" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.766167 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9879edf7-a11e-49fa-a1ad-b8057cc59072-catalog-content\") pod \"redhat-marketplace-fqzs2\" (UID: \"9879edf7-a11e-49fa-a1ad-b8057cc59072\") " pod="openshift-marketplace/redhat-marketplace-fqzs2" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.767074 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9879edf7-a11e-49fa-a1ad-b8057cc59072-catalog-content\") pod \"redhat-marketplace-fqzs2\" (UID: \"9879edf7-a11e-49fa-a1ad-b8057cc59072\") " pod="openshift-marketplace/redhat-marketplace-fqzs2" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.767091 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9879edf7-a11e-49fa-a1ad-b8057cc59072-utilities\") pod \"redhat-marketplace-fqzs2\" (UID: \"9879edf7-a11e-49fa-a1ad-b8057cc59072\") " pod="openshift-marketplace/redhat-marketplace-fqzs2" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.786981 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zm7rn\" (UniqueName: \"kubernetes.io/projected/9879edf7-a11e-49fa-a1ad-b8057cc59072-kube-api-access-zm7rn\") pod \"redhat-marketplace-fqzs2\" (UID: \"9879edf7-a11e-49fa-a1ad-b8057cc59072\") " pod="openshift-marketplace/redhat-marketplace-fqzs2" Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.878487 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4qdhh"] Nov 26 05:30:03 crc kubenswrapper[4871]: I1126 05:30:03.880996 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fqzs2" Nov 26 05:30:03 crc kubenswrapper[4871]: W1126 05:30:03.885094 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4151ee9a_4d65_4438_bf55_d437df2482d8.slice/crio-d80656c6e893326c805a61222f9a7025d19b2c8dfc8cbb0ef8e78efdfb822bf3 WatchSource:0}: Error finding container d80656c6e893326c805a61222f9a7025d19b2c8dfc8cbb0ef8e78efdfb822bf3: Status 404 returned error can't find the container with id d80656c6e893326c805a61222f9a7025d19b2c8dfc8cbb0ef8e78efdfb822bf3 Nov 26 05:30:04 crc kubenswrapper[4871]: I1126 05:30:04.052361 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fqzs2"] Nov 26 05:30:04 crc kubenswrapper[4871]: W1126 05:30:04.080397 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9879edf7_a11e_49fa_a1ad_b8057cc59072.slice/crio-597b865492b296841c249959914d121d79b203102d5db8a8660968dc42a4f31a WatchSource:0}: Error finding container 597b865492b296841c249959914d121d79b203102d5db8a8660968dc42a4f31a: Status 404 returned error can't find the container with id 597b865492b296841c249959914d121d79b203102d5db8a8660968dc42a4f31a Nov 26 05:30:04 crc kubenswrapper[4871]: I1126 05:30:04.269272 4871 generic.go:334] "Generic (PLEG): container finished" podID="9879edf7-a11e-49fa-a1ad-b8057cc59072" containerID="792e424e1e1e661c75e04a8044af6478ffe921e7ce142bda516a1989c128574b" exitCode=0 Nov 26 05:30:04 crc kubenswrapper[4871]: I1126 05:30:04.269381 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fqzs2" event={"ID":"9879edf7-a11e-49fa-a1ad-b8057cc59072","Type":"ContainerDied","Data":"792e424e1e1e661c75e04a8044af6478ffe921e7ce142bda516a1989c128574b"} Nov 26 05:30:04 crc kubenswrapper[4871]: I1126 05:30:04.270947 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fqzs2" event={"ID":"9879edf7-a11e-49fa-a1ad-b8057cc59072","Type":"ContainerStarted","Data":"597b865492b296841c249959914d121d79b203102d5db8a8660968dc42a4f31a"} Nov 26 05:30:04 crc kubenswrapper[4871]: I1126 05:30:04.272772 4871 generic.go:334] "Generic (PLEG): container finished" podID="4151ee9a-4d65-4438-bf55-d437df2482d8" containerID="4b8471853daac8238f0efaf6564235c538d0aac7a4fca1f547d0a5e025518ff9" exitCode=0 Nov 26 05:30:04 crc kubenswrapper[4871]: I1126 05:30:04.273331 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4qdhh" event={"ID":"4151ee9a-4d65-4438-bf55-d437df2482d8","Type":"ContainerDied","Data":"4b8471853daac8238f0efaf6564235c538d0aac7a4fca1f547d0a5e025518ff9"} Nov 26 05:30:04 crc kubenswrapper[4871]: I1126 05:30:04.273400 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4qdhh" event={"ID":"4151ee9a-4d65-4438-bf55-d437df2482d8","Type":"ContainerStarted","Data":"d80656c6e893326c805a61222f9a7025d19b2c8dfc8cbb0ef8e78efdfb822bf3"} Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.279300 4871 generic.go:334] "Generic (PLEG): container finished" podID="4151ee9a-4d65-4438-bf55-d437df2482d8" containerID="7da5fdf880b50a31046baa57a6d12209eb0d496d0fdfa1380bffcb8725a546d9" exitCode=0 Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.279359 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4qdhh" event={"ID":"4151ee9a-4d65-4438-bf55-d437df2482d8","Type":"ContainerDied","Data":"7da5fdf880b50a31046baa57a6d12209eb0d496d0fdfa1380bffcb8725a546d9"} Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.283576 4871 generic.go:334] "Generic (PLEG): container finished" podID="9879edf7-a11e-49fa-a1ad-b8057cc59072" containerID="36ea7919d9e7f98fb15f7a1a959d537570383aa917bee9749af61963f00b9520" exitCode=0 Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.283640 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fqzs2" event={"ID":"9879edf7-a11e-49fa-a1ad-b8057cc59072","Type":"ContainerDied","Data":"36ea7919d9e7f98fb15f7a1a959d537570383aa917bee9749af61963f00b9520"} Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.747926 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xb2lw"] Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.749240 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xb2lw" Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.753650 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.755610 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xb2lw"] Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.892862 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5113372b-1125-4d32-8af6-160defd5579a-catalog-content\") pod \"redhat-operators-xb2lw\" (UID: \"5113372b-1125-4d32-8af6-160defd5579a\") " pod="openshift-marketplace/redhat-operators-xb2lw" Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.892903 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frwsj\" (UniqueName: \"kubernetes.io/projected/5113372b-1125-4d32-8af6-160defd5579a-kube-api-access-frwsj\") pod \"redhat-operators-xb2lw\" (UID: \"5113372b-1125-4d32-8af6-160defd5579a\") " pod="openshift-marketplace/redhat-operators-xb2lw" Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.892945 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5113372b-1125-4d32-8af6-160defd5579a-utilities\") pod \"redhat-operators-xb2lw\" (UID: \"5113372b-1125-4d32-8af6-160defd5579a\") " pod="openshift-marketplace/redhat-operators-xb2lw" Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.941462 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2wpn8"] Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.942628 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2wpn8" Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.945080 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.953976 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2wpn8"] Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.993870 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5113372b-1125-4d32-8af6-160defd5579a-catalog-content\") pod \"redhat-operators-xb2lw\" (UID: \"5113372b-1125-4d32-8af6-160defd5579a\") " pod="openshift-marketplace/redhat-operators-xb2lw" Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.993909 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frwsj\" (UniqueName: \"kubernetes.io/projected/5113372b-1125-4d32-8af6-160defd5579a-kube-api-access-frwsj\") pod \"redhat-operators-xb2lw\" (UID: \"5113372b-1125-4d32-8af6-160defd5579a\") " pod="openshift-marketplace/redhat-operators-xb2lw" Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.993956 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5113372b-1125-4d32-8af6-160defd5579a-utilities\") pod \"redhat-operators-xb2lw\" (UID: \"5113372b-1125-4d32-8af6-160defd5579a\") " pod="openshift-marketplace/redhat-operators-xb2lw" Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.994646 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5113372b-1125-4d32-8af6-160defd5579a-utilities\") pod \"redhat-operators-xb2lw\" (UID: \"5113372b-1125-4d32-8af6-160defd5579a\") " pod="openshift-marketplace/redhat-operators-xb2lw" Nov 26 05:30:05 crc kubenswrapper[4871]: I1126 05:30:05.994837 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5113372b-1125-4d32-8af6-160defd5579a-catalog-content\") pod \"redhat-operators-xb2lw\" (UID: \"5113372b-1125-4d32-8af6-160defd5579a\") " pod="openshift-marketplace/redhat-operators-xb2lw" Nov 26 05:30:06 crc kubenswrapper[4871]: I1126 05:30:06.028565 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frwsj\" (UniqueName: \"kubernetes.io/projected/5113372b-1125-4d32-8af6-160defd5579a-kube-api-access-frwsj\") pod \"redhat-operators-xb2lw\" (UID: \"5113372b-1125-4d32-8af6-160defd5579a\") " pod="openshift-marketplace/redhat-operators-xb2lw" Nov 26 05:30:06 crc kubenswrapper[4871]: I1126 05:30:06.095537 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcpz5\" (UniqueName: \"kubernetes.io/projected/e6301213-5be0-4241-ba6d-01e1cfc78b78-kube-api-access-bcpz5\") pod \"community-operators-2wpn8\" (UID: \"e6301213-5be0-4241-ba6d-01e1cfc78b78\") " pod="openshift-marketplace/community-operators-2wpn8" Nov 26 05:30:06 crc kubenswrapper[4871]: I1126 05:30:06.095633 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6301213-5be0-4241-ba6d-01e1cfc78b78-catalog-content\") pod \"community-operators-2wpn8\" (UID: \"e6301213-5be0-4241-ba6d-01e1cfc78b78\") " pod="openshift-marketplace/community-operators-2wpn8" Nov 26 05:30:06 crc kubenswrapper[4871]: I1126 05:30:06.095682 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6301213-5be0-4241-ba6d-01e1cfc78b78-utilities\") pod \"community-operators-2wpn8\" (UID: \"e6301213-5be0-4241-ba6d-01e1cfc78b78\") " pod="openshift-marketplace/community-operators-2wpn8" Nov 26 05:30:06 crc kubenswrapper[4871]: I1126 05:30:06.161535 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xb2lw" Nov 26 05:30:06 crc kubenswrapper[4871]: I1126 05:30:06.197252 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6301213-5be0-4241-ba6d-01e1cfc78b78-catalog-content\") pod \"community-operators-2wpn8\" (UID: \"e6301213-5be0-4241-ba6d-01e1cfc78b78\") " pod="openshift-marketplace/community-operators-2wpn8" Nov 26 05:30:06 crc kubenswrapper[4871]: I1126 05:30:06.197309 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6301213-5be0-4241-ba6d-01e1cfc78b78-utilities\") pod \"community-operators-2wpn8\" (UID: \"e6301213-5be0-4241-ba6d-01e1cfc78b78\") " pod="openshift-marketplace/community-operators-2wpn8" Nov 26 05:30:06 crc kubenswrapper[4871]: I1126 05:30:06.197331 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcpz5\" (UniqueName: \"kubernetes.io/projected/e6301213-5be0-4241-ba6d-01e1cfc78b78-kube-api-access-bcpz5\") pod \"community-operators-2wpn8\" (UID: \"e6301213-5be0-4241-ba6d-01e1cfc78b78\") " pod="openshift-marketplace/community-operators-2wpn8" Nov 26 05:30:06 crc kubenswrapper[4871]: I1126 05:30:06.197887 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e6301213-5be0-4241-ba6d-01e1cfc78b78-catalog-content\") pod \"community-operators-2wpn8\" (UID: \"e6301213-5be0-4241-ba6d-01e1cfc78b78\") " pod="openshift-marketplace/community-operators-2wpn8" Nov 26 05:30:06 crc kubenswrapper[4871]: I1126 05:30:06.197910 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e6301213-5be0-4241-ba6d-01e1cfc78b78-utilities\") pod \"community-operators-2wpn8\" (UID: \"e6301213-5be0-4241-ba6d-01e1cfc78b78\") " pod="openshift-marketplace/community-operators-2wpn8" Nov 26 05:30:06 crc kubenswrapper[4871]: I1126 05:30:06.213105 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcpz5\" (UniqueName: \"kubernetes.io/projected/e6301213-5be0-4241-ba6d-01e1cfc78b78-kube-api-access-bcpz5\") pod \"community-operators-2wpn8\" (UID: \"e6301213-5be0-4241-ba6d-01e1cfc78b78\") " pod="openshift-marketplace/community-operators-2wpn8" Nov 26 05:30:06 crc kubenswrapper[4871]: I1126 05:30:06.256941 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2wpn8" Nov 26 05:30:06 crc kubenswrapper[4871]: I1126 05:30:06.290295 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4qdhh" event={"ID":"4151ee9a-4d65-4438-bf55-d437df2482d8","Type":"ContainerStarted","Data":"53711b2c704481621b5ab2c85c4993d3792ac2fab49134db485f98cfb6cc8af3"} Nov 26 05:30:06 crc kubenswrapper[4871]: I1126 05:30:06.293329 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fqzs2" event={"ID":"9879edf7-a11e-49fa-a1ad-b8057cc59072","Type":"ContainerStarted","Data":"2477c61d9d65346a8e63efe5a35145a00630f07553fdc12f7df1c6e6e9a4b835"} Nov 26 05:30:06 crc kubenswrapper[4871]: I1126 05:30:06.313911 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4qdhh" podStartSLOduration=1.8745998529999999 podStartE2EDuration="3.313890727s" podCreationTimestamp="2025-11-26 05:30:03 +0000 UTC" firstStartedPulling="2025-11-26 05:30:04.276971365 +0000 UTC m=+262.460022961" lastFinishedPulling="2025-11-26 05:30:05.716262239 +0000 UTC m=+263.899313835" observedRunningTime="2025-11-26 05:30:06.310086391 +0000 UTC m=+264.493137977" watchObservedRunningTime="2025-11-26 05:30:06.313890727 +0000 UTC m=+264.496942313" Nov 26 05:30:06 crc kubenswrapper[4871]: I1126 05:30:06.326783 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fqzs2" podStartSLOduration=1.80492485 podStartE2EDuration="3.326764565s" podCreationTimestamp="2025-11-26 05:30:03 +0000 UTC" firstStartedPulling="2025-11-26 05:30:04.270612683 +0000 UTC m=+262.453664269" lastFinishedPulling="2025-11-26 05:30:05.792452378 +0000 UTC m=+263.975503984" observedRunningTime="2025-11-26 05:30:06.324270141 +0000 UTC m=+264.507321737" watchObservedRunningTime="2025-11-26 05:30:06.326764565 +0000 UTC m=+264.509816151" Nov 26 05:30:06 crc kubenswrapper[4871]: I1126 05:30:06.367313 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xb2lw"] Nov 26 05:30:06 crc kubenswrapper[4871]: W1126 05:30:06.384541 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5113372b_1125_4d32_8af6_160defd5579a.slice/crio-0f29e20c798af5fb0426c2392ec017930c6eafe57b697e992284ee4a95f6c2f7 WatchSource:0}: Error finding container 0f29e20c798af5fb0426c2392ec017930c6eafe57b697e992284ee4a95f6c2f7: Status 404 returned error can't find the container with id 0f29e20c798af5fb0426c2392ec017930c6eafe57b697e992284ee4a95f6c2f7 Nov 26 05:30:06 crc kubenswrapper[4871]: I1126 05:30:06.490157 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2wpn8"] Nov 26 05:30:06 crc kubenswrapper[4871]: W1126 05:30:06.502977 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode6301213_5be0_4241_ba6d_01e1cfc78b78.slice/crio-853819766f7901d5977c4679d46382039b0c79200e6af58dc2c28b105175a1c7 WatchSource:0}: Error finding container 853819766f7901d5977c4679d46382039b0c79200e6af58dc2c28b105175a1c7: Status 404 returned error can't find the container with id 853819766f7901d5977c4679d46382039b0c79200e6af58dc2c28b105175a1c7 Nov 26 05:30:07 crc kubenswrapper[4871]: I1126 05:30:07.299006 4871 generic.go:334] "Generic (PLEG): container finished" podID="5113372b-1125-4d32-8af6-160defd5579a" containerID="fcf65908d32672ab1a7003dbb36e2bd549d9ad6f92d569ca8e5504470863a31c" exitCode=0 Nov 26 05:30:07 crc kubenswrapper[4871]: I1126 05:30:07.299045 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xb2lw" event={"ID":"5113372b-1125-4d32-8af6-160defd5579a","Type":"ContainerDied","Data":"fcf65908d32672ab1a7003dbb36e2bd549d9ad6f92d569ca8e5504470863a31c"} Nov 26 05:30:07 crc kubenswrapper[4871]: I1126 05:30:07.299351 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xb2lw" event={"ID":"5113372b-1125-4d32-8af6-160defd5579a","Type":"ContainerStarted","Data":"0f29e20c798af5fb0426c2392ec017930c6eafe57b697e992284ee4a95f6c2f7"} Nov 26 05:30:07 crc kubenswrapper[4871]: I1126 05:30:07.300705 4871 generic.go:334] "Generic (PLEG): container finished" podID="e6301213-5be0-4241-ba6d-01e1cfc78b78" containerID="ba8a10d23df1d4cd59f3a51f88ec644f05a28de9c5d50518bb27b3bd0e1bf5cd" exitCode=0 Nov 26 05:30:07 crc kubenswrapper[4871]: I1126 05:30:07.300729 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2wpn8" event={"ID":"e6301213-5be0-4241-ba6d-01e1cfc78b78","Type":"ContainerDied","Data":"ba8a10d23df1d4cd59f3a51f88ec644f05a28de9c5d50518bb27b3bd0e1bf5cd"} Nov 26 05:30:07 crc kubenswrapper[4871]: I1126 05:30:07.300756 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2wpn8" event={"ID":"e6301213-5be0-4241-ba6d-01e1cfc78b78","Type":"ContainerStarted","Data":"853819766f7901d5977c4679d46382039b0c79200e6af58dc2c28b105175a1c7"} Nov 26 05:30:08 crc kubenswrapper[4871]: I1126 05:30:08.308173 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xb2lw" event={"ID":"5113372b-1125-4d32-8af6-160defd5579a","Type":"ContainerStarted","Data":"6f4f8924ba30b4782121d3ca783b6cf502d9097780c5fdd34c3399d82744aa10"} Nov 26 05:30:08 crc kubenswrapper[4871]: I1126 05:30:08.309733 4871 generic.go:334] "Generic (PLEG): container finished" podID="e6301213-5be0-4241-ba6d-01e1cfc78b78" containerID="aae0ea7c0de1a0f720aaf24b2389792938c856a8ffd08ca8c80438cee3a0ef27" exitCode=0 Nov 26 05:30:08 crc kubenswrapper[4871]: I1126 05:30:08.309756 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2wpn8" event={"ID":"e6301213-5be0-4241-ba6d-01e1cfc78b78","Type":"ContainerDied","Data":"aae0ea7c0de1a0f720aaf24b2389792938c856a8ffd08ca8c80438cee3a0ef27"} Nov 26 05:30:09 crc kubenswrapper[4871]: I1126 05:30:09.322236 4871 generic.go:334] "Generic (PLEG): container finished" podID="5113372b-1125-4d32-8af6-160defd5579a" containerID="6f4f8924ba30b4782121d3ca783b6cf502d9097780c5fdd34c3399d82744aa10" exitCode=0 Nov 26 05:30:09 crc kubenswrapper[4871]: I1126 05:30:09.322432 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xb2lw" event={"ID":"5113372b-1125-4d32-8af6-160defd5579a","Type":"ContainerDied","Data":"6f4f8924ba30b4782121d3ca783b6cf502d9097780c5fdd34c3399d82744aa10"} Nov 26 05:30:09 crc kubenswrapper[4871]: I1126 05:30:09.329093 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2wpn8" event={"ID":"e6301213-5be0-4241-ba6d-01e1cfc78b78","Type":"ContainerStarted","Data":"33a5cd06a9b8911f88cabbe12906a18d298f4c7bb496d5a5c0889c4532405f6e"} Nov 26 05:30:09 crc kubenswrapper[4871]: I1126 05:30:09.366663 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2wpn8" podStartSLOduration=2.943130336 podStartE2EDuration="4.36664316s" podCreationTimestamp="2025-11-26 05:30:05 +0000 UTC" firstStartedPulling="2025-11-26 05:30:07.302337291 +0000 UTC m=+265.485388877" lastFinishedPulling="2025-11-26 05:30:08.725850115 +0000 UTC m=+266.908901701" observedRunningTime="2025-11-26 05:30:09.364157467 +0000 UTC m=+267.547209053" watchObservedRunningTime="2025-11-26 05:30:09.36664316 +0000 UTC m=+267.549694746" Nov 26 05:30:11 crc kubenswrapper[4871]: I1126 05:30:11.342184 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xb2lw" event={"ID":"5113372b-1125-4d32-8af6-160defd5579a","Type":"ContainerStarted","Data":"d7aea9208381be78e80938dcf266654b434e8a113082db281def5bd4791b81ed"} Nov 26 05:30:11 crc kubenswrapper[4871]: I1126 05:30:11.364243 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xb2lw" podStartSLOduration=3.953356694 podStartE2EDuration="6.364229173s" podCreationTimestamp="2025-11-26 05:30:05 +0000 UTC" firstStartedPulling="2025-11-26 05:30:07.30036895 +0000 UTC m=+265.483420536" lastFinishedPulling="2025-11-26 05:30:09.711241429 +0000 UTC m=+267.894293015" observedRunningTime="2025-11-26 05:30:11.361615407 +0000 UTC m=+269.544666983" watchObservedRunningTime="2025-11-26 05:30:11.364229173 +0000 UTC m=+269.547280759" Nov 26 05:30:13 crc kubenswrapper[4871]: I1126 05:30:13.668012 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4qdhh" Nov 26 05:30:13 crc kubenswrapper[4871]: I1126 05:30:13.668291 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4qdhh" Nov 26 05:30:13 crc kubenswrapper[4871]: I1126 05:30:13.712542 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4qdhh" Nov 26 05:30:13 crc kubenswrapper[4871]: I1126 05:30:13.882092 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fqzs2" Nov 26 05:30:13 crc kubenswrapper[4871]: I1126 05:30:13.882301 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fqzs2" Nov 26 05:30:13 crc kubenswrapper[4871]: I1126 05:30:13.925816 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fqzs2" Nov 26 05:30:14 crc kubenswrapper[4871]: I1126 05:30:14.403199 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fqzs2" Nov 26 05:30:14 crc kubenswrapper[4871]: I1126 05:30:14.410814 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4qdhh" Nov 26 05:30:16 crc kubenswrapper[4871]: I1126 05:30:16.162557 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xb2lw" Nov 26 05:30:16 crc kubenswrapper[4871]: I1126 05:30:16.162846 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xb2lw" Nov 26 05:30:16 crc kubenswrapper[4871]: I1126 05:30:16.209944 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xb2lw" Nov 26 05:30:16 crc kubenswrapper[4871]: I1126 05:30:16.258637 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2wpn8" Nov 26 05:30:16 crc kubenswrapper[4871]: I1126 05:30:16.258716 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2wpn8" Nov 26 05:30:16 crc kubenswrapper[4871]: I1126 05:30:16.311101 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2wpn8" Nov 26 05:30:16 crc kubenswrapper[4871]: I1126 05:30:16.411730 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xb2lw" Nov 26 05:30:16 crc kubenswrapper[4871]: I1126 05:30:16.412694 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2wpn8" Nov 26 05:31:23 crc kubenswrapper[4871]: I1126 05:31:23.615651 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:31:23 crc kubenswrapper[4871]: I1126 05:31:23.616458 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:31:53 crc kubenswrapper[4871]: I1126 05:31:53.614735 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:31:53 crc kubenswrapper[4871]: I1126 05:31:53.615353 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:32:23 crc kubenswrapper[4871]: I1126 05:32:23.614723 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:32:23 crc kubenswrapper[4871]: I1126 05:32:23.615448 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:32:23 crc kubenswrapper[4871]: I1126 05:32:23.615572 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:32:23 crc kubenswrapper[4871]: I1126 05:32:23.616412 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a526a4481d162e6a9e1a274d55add2a702076a153538d8c5c161152ee4344647"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 05:32:23 crc kubenswrapper[4871]: I1126 05:32:23.616566 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://a526a4481d162e6a9e1a274d55add2a702076a153538d8c5c161152ee4344647" gracePeriod=600 Nov 26 05:32:24 crc kubenswrapper[4871]: I1126 05:32:24.239859 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="a526a4481d162e6a9e1a274d55add2a702076a153538d8c5c161152ee4344647" exitCode=0 Nov 26 05:32:24 crc kubenswrapper[4871]: I1126 05:32:24.239923 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"a526a4481d162e6a9e1a274d55add2a702076a153538d8c5c161152ee4344647"} Nov 26 05:32:24 crc kubenswrapper[4871]: I1126 05:32:24.240521 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"73505ee26772aa1df09c89bae702b19bd7861dae0e72aa5f1011d13c2064a8d5"} Nov 26 05:32:24 crc kubenswrapper[4871]: I1126 05:32:24.240559 4871 scope.go:117] "RemoveContainer" containerID="3a256a9b9df537ef4245d9aeab99b6c62bf0736a39ba9d53b7f698232b37c417" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.471665 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-xm6rq"] Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.474432 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.501341 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-xm6rq"] Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.664129 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-installation-pull-secrets\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.664189 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-ca-trust-extracted\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.664270 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.664303 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-bound-sa-token\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.664327 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-registry-tls\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.664367 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-registry-certificates\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.664389 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-trusted-ca\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.664428 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wwbq\" (UniqueName: \"kubernetes.io/projected/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-kube-api-access-4wwbq\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.689238 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.765576 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-bound-sa-token\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.765631 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-registry-tls\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.765679 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-registry-certificates\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.765701 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-trusted-ca\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.765750 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wwbq\" (UniqueName: \"kubernetes.io/projected/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-kube-api-access-4wwbq\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.765800 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-installation-pull-secrets\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.765825 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-ca-trust-extracted\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.766463 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-ca-trust-extracted\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.767032 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-trusted-ca\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.767169 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-registry-certificates\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.771150 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-registry-tls\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.772269 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-installation-pull-secrets\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.788730 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wwbq\" (UniqueName: \"kubernetes.io/projected/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-kube-api-access-4wwbq\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.791472 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7c3e0445-522c-4c4c-adca-d3c1b4a905b6-bound-sa-token\") pod \"image-registry-66df7c8f76-xm6rq\" (UID: \"7c3e0445-522c-4c4c-adca-d3c1b4a905b6\") " pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.793588 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:50 crc kubenswrapper[4871]: I1126 05:32:50.953262 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-xm6rq"] Nov 26 05:32:51 crc kubenswrapper[4871]: I1126 05:32:51.432882 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" event={"ID":"7c3e0445-522c-4c4c-adca-d3c1b4a905b6","Type":"ContainerStarted","Data":"d77c8456fb59231e0457df51acd74a04357f327a14af4dcf94ee6621c18e65aa"} Nov 26 05:32:51 crc kubenswrapper[4871]: I1126 05:32:51.433008 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" event={"ID":"7c3e0445-522c-4c4c-adca-d3c1b4a905b6","Type":"ContainerStarted","Data":"aad6701a6bc67985b616ef8c408da564a1453205e1f4e07241d05d9e368f1ecf"} Nov 26 05:32:51 crc kubenswrapper[4871]: I1126 05:32:51.433204 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:32:51 crc kubenswrapper[4871]: I1126 05:32:51.469821 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" podStartSLOduration=1.469793323 podStartE2EDuration="1.469793323s" podCreationTimestamp="2025-11-26 05:32:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:32:51.462771774 +0000 UTC m=+429.645823400" watchObservedRunningTime="2025-11-26 05:32:51.469793323 +0000 UTC m=+429.652844949" Nov 26 05:33:10 crc kubenswrapper[4871]: I1126 05:33:10.802028 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-xm6rq" Nov 26 05:33:10 crc kubenswrapper[4871]: I1126 05:33:10.871483 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-tcqk7"] Nov 26 05:33:35 crc kubenswrapper[4871]: I1126 05:33:35.928036 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" podUID="0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03" containerName="registry" containerID="cri-o://f3a15f5678b2b28216202a91e5ee144d1da708e54e6139d11b352a185b87917d" gracePeriod=30 Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.364049 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.475009 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-registry-tls\") pod \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.475099 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-trusted-ca\") pod \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.475128 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9fdrl\" (UniqueName: \"kubernetes.io/projected/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-kube-api-access-9fdrl\") pod \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.475154 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-installation-pull-secrets\") pod \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.475416 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.475568 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-ca-trust-extracted\") pod \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.475609 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-bound-sa-token\") pod \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.475654 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-registry-certificates\") pod \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\" (UID: \"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03\") " Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.476618 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.476997 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.483713 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.484567 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-kube-api-access-9fdrl" (OuterVolumeSpecName: "kube-api-access-9fdrl") pod "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03"). InnerVolumeSpecName "kube-api-access-9fdrl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.484749 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.484978 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.491077 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.504758 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03" (UID: "0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.577013 4871 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.577069 4871 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.577088 4871 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.577114 4871 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.577133 4871 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.577151 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9fdrl\" (UniqueName: \"kubernetes.io/projected/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-kube-api-access-9fdrl\") on node \"crc\" DevicePath \"\"" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.577168 4871 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.763622 4871 generic.go:334] "Generic (PLEG): container finished" podID="0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03" containerID="f3a15f5678b2b28216202a91e5ee144d1da708e54e6139d11b352a185b87917d" exitCode=0 Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.763692 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" event={"ID":"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03","Type":"ContainerDied","Data":"f3a15f5678b2b28216202a91e5ee144d1da708e54e6139d11b352a185b87917d"} Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.763727 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.763750 4871 scope.go:117] "RemoveContainer" containerID="f3a15f5678b2b28216202a91e5ee144d1da708e54e6139d11b352a185b87917d" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.763734 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-tcqk7" event={"ID":"0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03","Type":"ContainerDied","Data":"fff78e86fe23eeab4c60eec2e48fc6dffb8c6ee990a4bf26d08094451c5aaa9f"} Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.799646 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-tcqk7"] Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.800226 4871 scope.go:117] "RemoveContainer" containerID="f3a15f5678b2b28216202a91e5ee144d1da708e54e6139d11b352a185b87917d" Nov 26 05:33:36 crc kubenswrapper[4871]: E1126 05:33:36.800905 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3a15f5678b2b28216202a91e5ee144d1da708e54e6139d11b352a185b87917d\": container with ID starting with f3a15f5678b2b28216202a91e5ee144d1da708e54e6139d11b352a185b87917d not found: ID does not exist" containerID="f3a15f5678b2b28216202a91e5ee144d1da708e54e6139d11b352a185b87917d" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.800958 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3a15f5678b2b28216202a91e5ee144d1da708e54e6139d11b352a185b87917d"} err="failed to get container status \"f3a15f5678b2b28216202a91e5ee144d1da708e54e6139d11b352a185b87917d\": rpc error: code = NotFound desc = could not find container \"f3a15f5678b2b28216202a91e5ee144d1da708e54e6139d11b352a185b87917d\": container with ID starting with f3a15f5678b2b28216202a91e5ee144d1da708e54e6139d11b352a185b87917d not found: ID does not exist" Nov 26 05:33:36 crc kubenswrapper[4871]: I1126 05:33:36.805507 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-tcqk7"] Nov 26 05:33:38 crc kubenswrapper[4871]: I1126 05:33:38.516357 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03" path="/var/lib/kubelet/pods/0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03/volumes" Nov 26 05:34:23 crc kubenswrapper[4871]: I1126 05:34:23.614657 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:34:23 crc kubenswrapper[4871]: I1126 05:34:23.615388 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:34:42 crc kubenswrapper[4871]: I1126 05:34:42.734417 4871 scope.go:117] "RemoveContainer" containerID="9a8bd9ec524d6d172b911cd75905952ca08aadc1705cdeb4c3d7d4c7706b7fc4" Nov 26 05:34:53 crc kubenswrapper[4871]: I1126 05:34:53.615047 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:34:53 crc kubenswrapper[4871]: I1126 05:34:53.616720 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.309806 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-v7fsr"] Nov 26 05:35:05 crc kubenswrapper[4871]: E1126 05:35:05.310608 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03" containerName="registry" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.310628 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03" containerName="registry" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.310810 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e6bd42b-5a3b-4cac-9ee9-9ea1ae6e6d03" containerName="registry" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.311365 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-v7fsr" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.316055 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.316133 4871 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-8wfbp" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.317385 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.319012 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-v7fsr"] Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.325709 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-gkprb"] Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.326914 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-gkprb" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.331340 4871 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-2m8lv" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.342741 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-gkprb"] Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.361174 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-2v767"] Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.362514 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-2v767" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.370814 4871 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-9m6k8" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.383103 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-2v767"] Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.464888 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjgq4\" (UniqueName: \"kubernetes.io/projected/c2ecf354-32f2-4cb3-80f1-e964ce5a3bdc-kube-api-access-zjgq4\") pod \"cert-manager-5b446d88c5-gkprb\" (UID: \"c2ecf354-32f2-4cb3-80f1-e964ce5a3bdc\") " pod="cert-manager/cert-manager-5b446d88c5-gkprb" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.464971 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7trx\" (UniqueName: \"kubernetes.io/projected/6a758ba2-2916-440d-9a57-149111e0ff4c-kube-api-access-n7trx\") pod \"cert-manager-webhook-5655c58dd6-2v767\" (UID: \"6a758ba2-2916-440d-9a57-149111e0ff4c\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-2v767" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.465005 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p25ps\" (UniqueName: \"kubernetes.io/projected/250180c0-d204-44e0-83b1-64259ea3bd68-kube-api-access-p25ps\") pod \"cert-manager-cainjector-7f985d654d-v7fsr\" (UID: \"250180c0-d204-44e0-83b1-64259ea3bd68\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-v7fsr" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.566926 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p25ps\" (UniqueName: \"kubernetes.io/projected/250180c0-d204-44e0-83b1-64259ea3bd68-kube-api-access-p25ps\") pod \"cert-manager-cainjector-7f985d654d-v7fsr\" (UID: \"250180c0-d204-44e0-83b1-64259ea3bd68\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-v7fsr" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.567112 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjgq4\" (UniqueName: \"kubernetes.io/projected/c2ecf354-32f2-4cb3-80f1-e964ce5a3bdc-kube-api-access-zjgq4\") pod \"cert-manager-5b446d88c5-gkprb\" (UID: \"c2ecf354-32f2-4cb3-80f1-e964ce5a3bdc\") " pod="cert-manager/cert-manager-5b446d88c5-gkprb" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.567161 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7trx\" (UniqueName: \"kubernetes.io/projected/6a758ba2-2916-440d-9a57-149111e0ff4c-kube-api-access-n7trx\") pod \"cert-manager-webhook-5655c58dd6-2v767\" (UID: \"6a758ba2-2916-440d-9a57-149111e0ff4c\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-2v767" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.586787 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjgq4\" (UniqueName: \"kubernetes.io/projected/c2ecf354-32f2-4cb3-80f1-e964ce5a3bdc-kube-api-access-zjgq4\") pod \"cert-manager-5b446d88c5-gkprb\" (UID: \"c2ecf354-32f2-4cb3-80f1-e964ce5a3bdc\") " pod="cert-manager/cert-manager-5b446d88c5-gkprb" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.588327 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7trx\" (UniqueName: \"kubernetes.io/projected/6a758ba2-2916-440d-9a57-149111e0ff4c-kube-api-access-n7trx\") pod \"cert-manager-webhook-5655c58dd6-2v767\" (UID: \"6a758ba2-2916-440d-9a57-149111e0ff4c\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-2v767" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.588453 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p25ps\" (UniqueName: \"kubernetes.io/projected/250180c0-d204-44e0-83b1-64259ea3bd68-kube-api-access-p25ps\") pod \"cert-manager-cainjector-7f985d654d-v7fsr\" (UID: \"250180c0-d204-44e0-83b1-64259ea3bd68\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-v7fsr" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.635144 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-v7fsr" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.660605 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-gkprb" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.686205 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-2v767" Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.914086 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-2v767"] Nov 26 05:35:05 crc kubenswrapper[4871]: I1126 05:35:05.923731 4871 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 05:35:06 crc kubenswrapper[4871]: I1126 05:35:06.056661 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-v7fsr"] Nov 26 05:35:06 crc kubenswrapper[4871]: W1126 05:35:06.058223 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod250180c0_d204_44e0_83b1_64259ea3bd68.slice/crio-04026f6eddd1ae6caefb138bfbe73bca39972450c919185b2160999fe64e823c WatchSource:0}: Error finding container 04026f6eddd1ae6caefb138bfbe73bca39972450c919185b2160999fe64e823c: Status 404 returned error can't find the container with id 04026f6eddd1ae6caefb138bfbe73bca39972450c919185b2160999fe64e823c Nov 26 05:35:06 crc kubenswrapper[4871]: I1126 05:35:06.088418 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-gkprb"] Nov 26 05:35:06 crc kubenswrapper[4871]: I1126 05:35:06.381724 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-gkprb" event={"ID":"c2ecf354-32f2-4cb3-80f1-e964ce5a3bdc","Type":"ContainerStarted","Data":"c43af0de36c4ec1a77980b819589b1c1b3f68c89ac23b43ba586122de354482e"} Nov 26 05:35:06 crc kubenswrapper[4871]: I1126 05:35:06.383836 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-v7fsr" event={"ID":"250180c0-d204-44e0-83b1-64259ea3bd68","Type":"ContainerStarted","Data":"04026f6eddd1ae6caefb138bfbe73bca39972450c919185b2160999fe64e823c"} Nov 26 05:35:06 crc kubenswrapper[4871]: I1126 05:35:06.385130 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-2v767" event={"ID":"6a758ba2-2916-440d-9a57-149111e0ff4c","Type":"ContainerStarted","Data":"101d5e30a9bd9009066a9cd5a5906538bb952f8976ca3ef81c89514f9cb179a1"} Nov 26 05:35:09 crc kubenswrapper[4871]: I1126 05:35:09.407588 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-2v767" event={"ID":"6a758ba2-2916-440d-9a57-149111e0ff4c","Type":"ContainerStarted","Data":"260ff5ab8e7cbe7c7aec05c7797c4e41630771df3007a109a5a5b6a1a9e55674"} Nov 26 05:35:09 crc kubenswrapper[4871]: I1126 05:35:09.409078 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-2v767" Nov 26 05:35:09 crc kubenswrapper[4871]: I1126 05:35:09.436391 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-2v767" podStartSLOduration=1.171162648 podStartE2EDuration="4.436367435s" podCreationTimestamp="2025-11-26 05:35:05 +0000 UTC" firstStartedPulling="2025-11-26 05:35:05.923427894 +0000 UTC m=+564.106479480" lastFinishedPulling="2025-11-26 05:35:09.188632671 +0000 UTC m=+567.371684267" observedRunningTime="2025-11-26 05:35:09.432687661 +0000 UTC m=+567.615739257" watchObservedRunningTime="2025-11-26 05:35:09.436367435 +0000 UTC m=+567.619419031" Nov 26 05:35:10 crc kubenswrapper[4871]: I1126 05:35:10.417598 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-gkprb" event={"ID":"c2ecf354-32f2-4cb3-80f1-e964ce5a3bdc","Type":"ContainerStarted","Data":"9f3ef0ff30939fda3c271aa2711d0d90db6d3504c75f78d87d56ec764bd52138"} Nov 26 05:35:10 crc kubenswrapper[4871]: I1126 05:35:10.420160 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-v7fsr" event={"ID":"250180c0-d204-44e0-83b1-64259ea3bd68","Type":"ContainerStarted","Data":"1c511dca4f4fc206956567d200e0138c02be4963624d7263a04748355e10b845"} Nov 26 05:35:10 crc kubenswrapper[4871]: I1126 05:35:10.446791 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-gkprb" podStartSLOduration=2.273706722 podStartE2EDuration="5.446759074s" podCreationTimestamp="2025-11-26 05:35:05 +0000 UTC" firstStartedPulling="2025-11-26 05:35:06.095125623 +0000 UTC m=+564.278177209" lastFinishedPulling="2025-11-26 05:35:09.268177935 +0000 UTC m=+567.451229561" observedRunningTime="2025-11-26 05:35:10.437309253 +0000 UTC m=+568.620360879" watchObservedRunningTime="2025-11-26 05:35:10.446759074 +0000 UTC m=+568.629810720" Nov 26 05:35:10 crc kubenswrapper[4871]: I1126 05:35:10.479736 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-v7fsr" podStartSLOduration=2.351184293 podStartE2EDuration="5.479713912s" podCreationTimestamp="2025-11-26 05:35:05 +0000 UTC" firstStartedPulling="2025-11-26 05:35:06.060332847 +0000 UTC m=+564.243384433" lastFinishedPulling="2025-11-26 05:35:09.188862446 +0000 UTC m=+567.371914052" observedRunningTime="2025-11-26 05:35:10.460996916 +0000 UTC m=+568.644048522" watchObservedRunningTime="2025-11-26 05:35:10.479713912 +0000 UTC m=+568.662765508" Nov 26 05:35:15 crc kubenswrapper[4871]: I1126 05:35:15.690141 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-2v767" Nov 26 05:35:15 crc kubenswrapper[4871]: I1126 05:35:15.939806 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-qzw7d"] Nov 26 05:35:15 crc kubenswrapper[4871]: I1126 05:35:15.940251 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovn-controller" containerID="cri-o://386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa" gracePeriod=30 Nov 26 05:35:15 crc kubenswrapper[4871]: I1126 05:35:15.940338 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="nbdb" containerID="cri-o://30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea" gracePeriod=30 Nov 26 05:35:15 crc kubenswrapper[4871]: I1126 05:35:15.940420 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="sbdb" containerID="cri-o://8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647" gracePeriod=30 Nov 26 05:35:15 crc kubenswrapper[4871]: I1126 05:35:15.940421 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452" gracePeriod=30 Nov 26 05:35:15 crc kubenswrapper[4871]: I1126 05:35:15.940512 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="northd" containerID="cri-o://1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c" gracePeriod=30 Nov 26 05:35:15 crc kubenswrapper[4871]: I1126 05:35:15.940571 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovn-acl-logging" containerID="cri-o://1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d" gracePeriod=30 Nov 26 05:35:15 crc kubenswrapper[4871]: I1126 05:35:15.940728 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="kube-rbac-proxy-node" containerID="cri-o://e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84" gracePeriod=30 Nov 26 05:35:15 crc kubenswrapper[4871]: I1126 05:35:15.968623 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovnkube-controller" containerID="cri-o://2728c981c0552e6cd2d6812b668022b9e869813f5e36a80a43b5b3070b2872f2" gracePeriod=30 Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.466018 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovnkube-controller/3.log" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.471927 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovn-acl-logging/0.log" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.472952 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovn-controller/0.log" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.473894 4871 generic.go:334] "Generic (PLEG): container finished" podID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerID="2728c981c0552e6cd2d6812b668022b9e869813f5e36a80a43b5b3070b2872f2" exitCode=0 Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.474011 4871 generic.go:334] "Generic (PLEG): container finished" podID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerID="8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647" exitCode=0 Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.474304 4871 generic.go:334] "Generic (PLEG): container finished" podID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerID="30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea" exitCode=0 Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.474321 4871 generic.go:334] "Generic (PLEG): container finished" podID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerID="1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c" exitCode=0 Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.474330 4871 generic.go:334] "Generic (PLEG): container finished" podID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerID="3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452" exitCode=0 Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.474338 4871 generic.go:334] "Generic (PLEG): container finished" podID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerID="e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84" exitCode=0 Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.474347 4871 generic.go:334] "Generic (PLEG): container finished" podID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerID="1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d" exitCode=143 Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.474355 4871 generic.go:334] "Generic (PLEG): container finished" podID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerID="386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa" exitCode=143 Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.473939 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerDied","Data":"2728c981c0552e6cd2d6812b668022b9e869813f5e36a80a43b5b3070b2872f2"} Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.474438 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerDied","Data":"8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647"} Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.474458 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerDied","Data":"30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea"} Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.474471 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerDied","Data":"1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c"} Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.474483 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerDied","Data":"3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452"} Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.474495 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerDied","Data":"e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84"} Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.474505 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerDied","Data":"1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d"} Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.474543 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerDied","Data":"386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa"} Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.474563 4871 scope.go:117] "RemoveContainer" containerID="165ef93f065973ad6c896ad290235fa4a66d891d5829d3e2d689c49f7e8951ef" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.477781 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rpr6z_84290973-bc95-4326-bacd-7c210346620a/kube-multus/2.log" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.478761 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rpr6z_84290973-bc95-4326-bacd-7c210346620a/kube-multus/1.log" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.478839 4871 generic.go:334] "Generic (PLEG): container finished" podID="84290973-bc95-4326-bacd-7c210346620a" containerID="417bc65daf58d30ae61be4b4c6e5b7a604e2c0b9c899b8c31c9d9fe1276ba648" exitCode=2 Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.478887 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rpr6z" event={"ID":"84290973-bc95-4326-bacd-7c210346620a","Type":"ContainerDied","Data":"417bc65daf58d30ae61be4b4c6e5b7a604e2c0b9c899b8c31c9d9fe1276ba648"} Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.479675 4871 scope.go:117] "RemoveContainer" containerID="417bc65daf58d30ae61be4b4c6e5b7a604e2c0b9c899b8c31c9d9fe1276ba648" Nov 26 05:35:16 crc kubenswrapper[4871]: E1126 05:35:16.480092 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-rpr6z_openshift-multus(84290973-bc95-4326-bacd-7c210346620a)\"" pod="openshift-multus/multus-rpr6z" podUID="84290973-bc95-4326-bacd-7c210346620a" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.509196 4871 scope.go:117] "RemoveContainer" containerID="2c10dc36740ec87314e3a58a4a96133df5ecb4a901474b032895bb318b2c3ca6" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.624238 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovn-acl-logging/0.log" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.625217 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovn-controller/0.log" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.626305 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.708302 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-rfsjj"] Nov 26 05:35:16 crc kubenswrapper[4871]: E1126 05:35:16.708638 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.708662 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 05:35:16 crc kubenswrapper[4871]: E1126 05:35:16.708681 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovn-acl-logging" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.708693 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovn-acl-logging" Nov 26 05:35:16 crc kubenswrapper[4871]: E1126 05:35:16.708710 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovnkube-controller" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.708722 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovnkube-controller" Nov 26 05:35:16 crc kubenswrapper[4871]: E1126 05:35:16.708737 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="kubecfg-setup" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.708749 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="kubecfg-setup" Nov 26 05:35:16 crc kubenswrapper[4871]: E1126 05:35:16.708764 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="nbdb" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.708776 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="nbdb" Nov 26 05:35:16 crc kubenswrapper[4871]: E1126 05:35:16.708795 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovnkube-controller" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.708807 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovnkube-controller" Nov 26 05:35:16 crc kubenswrapper[4871]: E1126 05:35:16.708823 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovnkube-controller" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.708835 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovnkube-controller" Nov 26 05:35:16 crc kubenswrapper[4871]: E1126 05:35:16.708848 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovnkube-controller" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.708861 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovnkube-controller" Nov 26 05:35:16 crc kubenswrapper[4871]: E1126 05:35:16.708880 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="kube-rbac-proxy-node" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.708893 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="kube-rbac-proxy-node" Nov 26 05:35:16 crc kubenswrapper[4871]: E1126 05:35:16.708910 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovn-controller" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.708922 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovn-controller" Nov 26 05:35:16 crc kubenswrapper[4871]: E1126 05:35:16.708935 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="northd" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.708948 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="northd" Nov 26 05:35:16 crc kubenswrapper[4871]: E1126 05:35:16.708972 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="sbdb" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.708984 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="sbdb" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.709137 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovnkube-controller" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.709158 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovn-controller" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.709173 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovnkube-controller" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.709189 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovnkube-controller" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.709202 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovnkube-controller" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.709219 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="kube-rbac-proxy-node" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.709231 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="nbdb" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.709251 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.709270 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovn-acl-logging" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.709288 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="sbdb" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.709303 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="northd" Nov 26 05:35:16 crc kubenswrapper[4871]: E1126 05:35:16.709457 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovnkube-controller" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.709471 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovnkube-controller" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.709673 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" containerName="ovnkube-controller" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.716826 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743003 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-run-ovn\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743061 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-node-log\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743099 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-cni-netd\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743135 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-log-socket\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743180 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-run-netns\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743216 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-cni-bin\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743250 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9nzm5\" (UniqueName: \"kubernetes.io/projected/6a0aba42-7edc-4d81-850e-3e3439eeaec8-kube-api-access-9nzm5\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743278 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-slash\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743304 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-etc-openvswitch\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743339 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6a0aba42-7edc-4d81-850e-3e3439eeaec8-ovnkube-config\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743366 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-var-lib-cni-networks-ovn-kubernetes\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743396 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6a0aba42-7edc-4d81-850e-3e3439eeaec8-env-overrides\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743425 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-var-lib-openvswitch\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743457 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6a0aba42-7edc-4d81-850e-3e3439eeaec8-ovn-node-metrics-cert\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743501 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-run-openvswitch\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743550 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-systemd-units\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743590 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-run-ovn-kubernetes\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743631 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-run-systemd\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743673 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6a0aba42-7edc-4d81-850e-3e3439eeaec8-ovnkube-script-lib\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743703 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-kubelet\") pod \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\" (UID: \"6a0aba42-7edc-4d81-850e-3e3439eeaec8\") " Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.743991 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.744053 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.744442 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a0aba42-7edc-4d81-850e-3e3439eeaec8-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.744462 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a0aba42-7edc-4d81-850e-3e3439eeaec8-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.744553 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.744925 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.745084 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-node-log" (OuterVolumeSpecName: "node-log") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.745148 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.745192 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-log-socket" (OuterVolumeSpecName: "log-socket") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.745209 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.745256 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.745298 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.745298 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.745335 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.745896 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-slash" (OuterVolumeSpecName: "host-slash") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.746586 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a0aba42-7edc-4d81-850e-3e3439eeaec8-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.746638 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.751275 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a0aba42-7edc-4d81-850e-3e3439eeaec8-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.751424 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a0aba42-7edc-4d81-850e-3e3439eeaec8-kube-api-access-9nzm5" (OuterVolumeSpecName: "kube-api-access-9nzm5") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "kube-api-access-9nzm5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.770476 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "6a0aba42-7edc-4d81-850e-3e3439eeaec8" (UID: "6a0aba42-7edc-4d81-850e-3e3439eeaec8"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.845485 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-run-systemd\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.845602 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-node-log\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.845659 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rq2b\" (UniqueName: \"kubernetes.io/projected/1d5d512e-0f72-416b-a18e-d8eccbd57242-kube-api-access-6rq2b\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.845738 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/1d5d512e-0f72-416b-a18e-d8eccbd57242-ovnkube-script-lib\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.845771 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1d5d512e-0f72-416b-a18e-d8eccbd57242-ovnkube-config\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.845826 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-run-ovn\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.845865 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.845945 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1d5d512e-0f72-416b-a18e-d8eccbd57242-env-overrides\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.845986 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1d5d512e-0f72-416b-a18e-d8eccbd57242-ovn-node-metrics-cert\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846029 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-systemd-units\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846059 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-cni-bin\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846094 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-var-lib-openvswitch\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846124 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-kubelet\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846156 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-slash\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846300 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-run-netns\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846340 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-run-ovn-kubernetes\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846366 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-log-socket\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846388 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-etc-openvswitch\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846415 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-run-openvswitch\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846437 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-cni-netd\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846758 4871 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846814 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9nzm5\" (UniqueName: \"kubernetes.io/projected/6a0aba42-7edc-4d81-850e-3e3439eeaec8-kube-api-access-9nzm5\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846838 4871 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-slash\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846856 4871 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846880 4871 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6a0aba42-7edc-4d81-850e-3e3439eeaec8-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846900 4871 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846918 4871 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6a0aba42-7edc-4d81-850e-3e3439eeaec8-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846937 4871 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846956 4871 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6a0aba42-7edc-4d81-850e-3e3439eeaec8-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846974 4871 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.846993 4871 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.847011 4871 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.847030 4871 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.847048 4871 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6a0aba42-7edc-4d81-850e-3e3439eeaec8-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.847067 4871 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.847085 4871 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.847102 4871 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-node-log\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.847120 4871 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.847139 4871 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-log-socket\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.847157 4871 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6a0aba42-7edc-4d81-850e-3e3439eeaec8-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.948864 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/1d5d512e-0f72-416b-a18e-d8eccbd57242-ovnkube-script-lib\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.948958 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1d5d512e-0f72-416b-a18e-d8eccbd57242-ovnkube-config\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949046 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-run-ovn\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949100 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949163 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1d5d512e-0f72-416b-a18e-d8eccbd57242-env-overrides\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949203 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1d5d512e-0f72-416b-a18e-d8eccbd57242-ovn-node-metrics-cert\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949255 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-cni-bin\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949297 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-systemd-units\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949342 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-var-lib-openvswitch\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949353 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949388 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-kubelet\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949431 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-slash\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949481 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-run-netns\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949494 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-run-ovn\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949569 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-run-ovn-kubernetes\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949624 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-systemd-units\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949628 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-log-socket\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949728 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-run-netns\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949762 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-etc-openvswitch\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949801 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-run-ovn-kubernetes\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949842 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-run-openvswitch\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949859 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-kubelet\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949810 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-var-lib-openvswitch\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949894 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-cni-netd\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949948 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-run-systemd\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949999 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-cni-bin\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.950002 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-node-log\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.950053 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-node-log\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949767 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-slash\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.950082 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rq2b\" (UniqueName: \"kubernetes.io/projected/1d5d512e-0f72-416b-a18e-d8eccbd57242-kube-api-access-6rq2b\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.950204 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-run-openvswitch\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.950269 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-host-cni-netd\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.950328 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-run-systemd\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949954 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-etc-openvswitch\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.949682 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/1d5d512e-0f72-416b-a18e-d8eccbd57242-log-socket\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.950603 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/1d5d512e-0f72-416b-a18e-d8eccbd57242-ovnkube-script-lib\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.950814 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1d5d512e-0f72-416b-a18e-d8eccbd57242-ovnkube-config\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.951270 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1d5d512e-0f72-416b-a18e-d8eccbd57242-env-overrides\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.955614 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1d5d512e-0f72-416b-a18e-d8eccbd57242-ovn-node-metrics-cert\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:16 crc kubenswrapper[4871]: I1126 05:35:16.975632 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rq2b\" (UniqueName: \"kubernetes.io/projected/1d5d512e-0f72-416b-a18e-d8eccbd57242-kube-api-access-6rq2b\") pod \"ovnkube-node-rfsjj\" (UID: \"1d5d512e-0f72-416b-a18e-d8eccbd57242\") " pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.044662 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:17 crc kubenswrapper[4871]: W1126 05:35:17.083519 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d5d512e_0f72_416b_a18e_d8eccbd57242.slice/crio-00ed1f4d23c016f1d08135c116378a4af936c4ccbf8d0188b5eb3aad857d7e28 WatchSource:0}: Error finding container 00ed1f4d23c016f1d08135c116378a4af936c4ccbf8d0188b5eb3aad857d7e28: Status 404 returned error can't find the container with id 00ed1f4d23c016f1d08135c116378a4af936c4ccbf8d0188b5eb3aad857d7e28 Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.487081 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rpr6z_84290973-bc95-4326-bacd-7c210346620a/kube-multus/2.log" Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.492211 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovn-acl-logging/0.log" Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.493024 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qzw7d_6a0aba42-7edc-4d81-850e-3e3439eeaec8/ovn-controller/0.log" Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.493483 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" event={"ID":"6a0aba42-7edc-4d81-850e-3e3439eeaec8","Type":"ContainerDied","Data":"20b0d04aa609bd9448f9eebd0c8d8d4d04c75efa3c26ab1247d21da581e89bc6"} Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.493586 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qzw7d" Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.493664 4871 scope.go:117] "RemoveContainer" containerID="2728c981c0552e6cd2d6812b668022b9e869813f5e36a80a43b5b3070b2872f2" Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.495889 4871 generic.go:334] "Generic (PLEG): container finished" podID="1d5d512e-0f72-416b-a18e-d8eccbd57242" containerID="22e92190bc1ae2ae49be98eddcdad35b9da60c75d967d5763ed19f3451aeff74" exitCode=0 Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.495962 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" event={"ID":"1d5d512e-0f72-416b-a18e-d8eccbd57242","Type":"ContainerDied","Data":"22e92190bc1ae2ae49be98eddcdad35b9da60c75d967d5763ed19f3451aeff74"} Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.496048 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" event={"ID":"1d5d512e-0f72-416b-a18e-d8eccbd57242","Type":"ContainerStarted","Data":"00ed1f4d23c016f1d08135c116378a4af936c4ccbf8d0188b5eb3aad857d7e28"} Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.526101 4871 scope.go:117] "RemoveContainer" containerID="8344f3a2c68dc9ec84a97efff9308bf7f73d5fcce4935d5ec6dd71e7f5597647" Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.564969 4871 scope.go:117] "RemoveContainer" containerID="30744fa7da0302cee90e4567d1a72afc1a68e8f1fe05e0db133aab389fda61ea" Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.564974 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-qzw7d"] Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.575759 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-qzw7d"] Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.598588 4871 scope.go:117] "RemoveContainer" containerID="1283b947d514fcb87c31fcfac6cbdb7858015f0536bdb132750a32b1088f856c" Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.613391 4871 scope.go:117] "RemoveContainer" containerID="3a888a927264c5de0abf66a61074bafa506e2793dcc45f978424655c20123452" Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.627693 4871 scope.go:117] "RemoveContainer" containerID="e12a06723a912cca02b3b66875ddfe85669489b6e54899a73a54c545286dbb84" Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.639425 4871 scope.go:117] "RemoveContainer" containerID="1b17f937b01a3a3e1200b287d001bc38c5582f324a2f581a249ecb60aafeb02d" Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.651993 4871 scope.go:117] "RemoveContainer" containerID="386da56139c45bdfd9056017728cf1aad6aa2c07c6b388fef0857a02ae9a9efa" Nov 26 05:35:17 crc kubenswrapper[4871]: I1126 05:35:17.663587 4871 scope.go:117] "RemoveContainer" containerID="0bba26a5278d45a3368ec9793093a48d0e088e1e0b857cb5fe897228797f3c57" Nov 26 05:35:18 crc kubenswrapper[4871]: I1126 05:35:18.521617 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a0aba42-7edc-4d81-850e-3e3439eeaec8" path="/var/lib/kubelet/pods/6a0aba42-7edc-4d81-850e-3e3439eeaec8/volumes" Nov 26 05:35:18 crc kubenswrapper[4871]: I1126 05:35:18.524183 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" event={"ID":"1d5d512e-0f72-416b-a18e-d8eccbd57242","Type":"ContainerStarted","Data":"47d7eda8cb0891a145141934e3fdd60f9669c01a86c73f892fd41a78ce7b4873"} Nov 26 05:35:18 crc kubenswrapper[4871]: I1126 05:35:18.524276 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" event={"ID":"1d5d512e-0f72-416b-a18e-d8eccbd57242","Type":"ContainerStarted","Data":"5e525b0e892dc6677c66d1c291e01bd9d289390a83f43dd4797ae1e447588d07"} Nov 26 05:35:18 crc kubenswrapper[4871]: I1126 05:35:18.524310 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" event={"ID":"1d5d512e-0f72-416b-a18e-d8eccbd57242","Type":"ContainerStarted","Data":"db7db982171e260aa31523010fc648f31f1f3fb988506ebfcb2fa8048adefb70"} Nov 26 05:35:18 crc kubenswrapper[4871]: I1126 05:35:18.524330 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" event={"ID":"1d5d512e-0f72-416b-a18e-d8eccbd57242","Type":"ContainerStarted","Data":"acfc6a34c4d431339acabf64e5383f8279038a9e5fe40113087639580bdd691e"} Nov 26 05:35:18 crc kubenswrapper[4871]: I1126 05:35:18.524348 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" event={"ID":"1d5d512e-0f72-416b-a18e-d8eccbd57242","Type":"ContainerStarted","Data":"2b224646b315041462068f7cba9237a0ff19e1be89cbf396e677159cc7b12313"} Nov 26 05:35:18 crc kubenswrapper[4871]: I1126 05:35:18.524365 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" event={"ID":"1d5d512e-0f72-416b-a18e-d8eccbd57242","Type":"ContainerStarted","Data":"27c47a0c1bdddd75be25d8b46c5dcfb4bab7b9488aa3913778f3a1aa4d7c76c5"} Nov 26 05:35:21 crc kubenswrapper[4871]: I1126 05:35:21.549072 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" event={"ID":"1d5d512e-0f72-416b-a18e-d8eccbd57242","Type":"ContainerStarted","Data":"13a47abaf0e62e297f6709ec207d8d97b8692024af22d06164c607828efd662b"} Nov 26 05:35:23 crc kubenswrapper[4871]: I1126 05:35:23.580234 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" event={"ID":"1d5d512e-0f72-416b-a18e-d8eccbd57242","Type":"ContainerStarted","Data":"b663be58228c8f88134f4c3fbb4bdf19349c6dd08b047b81ede356898c8251ff"} Nov 26 05:35:23 crc kubenswrapper[4871]: I1126 05:35:23.580704 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:23 crc kubenswrapper[4871]: I1126 05:35:23.580741 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:23 crc kubenswrapper[4871]: I1126 05:35:23.580765 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:23 crc kubenswrapper[4871]: I1126 05:35:23.615593 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:35:23 crc kubenswrapper[4871]: I1126 05:35:23.615679 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:35:23 crc kubenswrapper[4871]: I1126 05:35:23.615744 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:35:23 crc kubenswrapper[4871]: I1126 05:35:23.616682 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"73505ee26772aa1df09c89bae702b19bd7861dae0e72aa5f1011d13c2064a8d5"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 05:35:23 crc kubenswrapper[4871]: I1126 05:35:23.616795 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://73505ee26772aa1df09c89bae702b19bd7861dae0e72aa5f1011d13c2064a8d5" gracePeriod=600 Nov 26 05:35:23 crc kubenswrapper[4871]: I1126 05:35:23.633812 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" podStartSLOduration=7.633788632 podStartE2EDuration="7.633788632s" podCreationTimestamp="2025-11-26 05:35:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:35:23.629823831 +0000 UTC m=+581.812875447" watchObservedRunningTime="2025-11-26 05:35:23.633788632 +0000 UTC m=+581.816840258" Nov 26 05:35:23 crc kubenswrapper[4871]: I1126 05:35:23.682688 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:23 crc kubenswrapper[4871]: I1126 05:35:23.682966 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:24 crc kubenswrapper[4871]: I1126 05:35:24.587987 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="73505ee26772aa1df09c89bae702b19bd7861dae0e72aa5f1011d13c2064a8d5" exitCode=0 Nov 26 05:35:24 crc kubenswrapper[4871]: I1126 05:35:24.589811 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"73505ee26772aa1df09c89bae702b19bd7861dae0e72aa5f1011d13c2064a8d5"} Nov 26 05:35:24 crc kubenswrapper[4871]: I1126 05:35:24.589865 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"f9b3c6b7dc711fbab7cfc1df233a4b33f288cd38725d31ae281cb8abef183fd7"} Nov 26 05:35:24 crc kubenswrapper[4871]: I1126 05:35:24.589892 4871 scope.go:117] "RemoveContainer" containerID="a526a4481d162e6a9e1a274d55add2a702076a153538d8c5c161152ee4344647" Nov 26 05:35:31 crc kubenswrapper[4871]: I1126 05:35:31.507369 4871 scope.go:117] "RemoveContainer" containerID="417bc65daf58d30ae61be4b4c6e5b7a604e2c0b9c899b8c31c9d9fe1276ba648" Nov 26 05:35:31 crc kubenswrapper[4871]: E1126 05:35:31.508230 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-rpr6z_openshift-multus(84290973-bc95-4326-bacd-7c210346620a)\"" pod="openshift-multus/multus-rpr6z" podUID="84290973-bc95-4326-bacd-7c210346620a" Nov 26 05:35:42 crc kubenswrapper[4871]: I1126 05:35:42.967380 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d"] Nov 26 05:35:42 crc kubenswrapper[4871]: I1126 05:35:42.969742 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:35:42 crc kubenswrapper[4871]: I1126 05:35:42.972459 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 05:35:42 crc kubenswrapper[4871]: I1126 05:35:42.977693 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d"] Nov 26 05:35:43 crc kubenswrapper[4871]: I1126 05:35:43.120895 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0acf5a3d-2727-42dd-a502-e7b8ad27a0a9-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d\" (UID: \"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:35:43 crc kubenswrapper[4871]: I1126 05:35:43.120991 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0acf5a3d-2727-42dd-a502-e7b8ad27a0a9-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d\" (UID: \"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:35:43 crc kubenswrapper[4871]: I1126 05:35:43.121157 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgdht\" (UniqueName: \"kubernetes.io/projected/0acf5a3d-2727-42dd-a502-e7b8ad27a0a9-kube-api-access-kgdht\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d\" (UID: \"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:35:43 crc kubenswrapper[4871]: I1126 05:35:43.222841 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0acf5a3d-2727-42dd-a502-e7b8ad27a0a9-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d\" (UID: \"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:35:43 crc kubenswrapper[4871]: I1126 05:35:43.222968 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0acf5a3d-2727-42dd-a502-e7b8ad27a0a9-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d\" (UID: \"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:35:43 crc kubenswrapper[4871]: I1126 05:35:43.223080 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgdht\" (UniqueName: \"kubernetes.io/projected/0acf5a3d-2727-42dd-a502-e7b8ad27a0a9-kube-api-access-kgdht\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d\" (UID: \"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:35:43 crc kubenswrapper[4871]: I1126 05:35:43.223487 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0acf5a3d-2727-42dd-a502-e7b8ad27a0a9-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d\" (UID: \"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:35:43 crc kubenswrapper[4871]: I1126 05:35:43.223946 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0acf5a3d-2727-42dd-a502-e7b8ad27a0a9-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d\" (UID: \"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:35:43 crc kubenswrapper[4871]: I1126 05:35:43.248947 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgdht\" (UniqueName: \"kubernetes.io/projected/0acf5a3d-2727-42dd-a502-e7b8ad27a0a9-kube-api-access-kgdht\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d\" (UID: \"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:35:43 crc kubenswrapper[4871]: I1126 05:35:43.301721 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:35:43 crc kubenswrapper[4871]: E1126 05:35:43.339037 4871 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_openshift-marketplace_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9_0(b5728153705240133a98d53254da6ff635b53d5578b1996753953e39b57aa8ea): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 05:35:43 crc kubenswrapper[4871]: E1126 05:35:43.339154 4871 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_openshift-marketplace_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9_0(b5728153705240133a98d53254da6ff635b53d5578b1996753953e39b57aa8ea): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:35:43 crc kubenswrapper[4871]: E1126 05:35:43.339193 4871 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_openshift-marketplace_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9_0(b5728153705240133a98d53254da6ff635b53d5578b1996753953e39b57aa8ea): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:35:43 crc kubenswrapper[4871]: E1126 05:35:43.339342 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_openshift-marketplace(0acf5a3d-2727-42dd-a502-e7b8ad27a0a9)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_openshift-marketplace(0acf5a3d-2727-42dd-a502-e7b8ad27a0a9)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_openshift-marketplace_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9_0(b5728153705240133a98d53254da6ff635b53d5578b1996753953e39b57aa8ea): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" podUID="0acf5a3d-2727-42dd-a502-e7b8ad27a0a9" Nov 26 05:35:43 crc kubenswrapper[4871]: I1126 05:35:43.725940 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:35:43 crc kubenswrapper[4871]: I1126 05:35:43.726793 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:35:43 crc kubenswrapper[4871]: E1126 05:35:43.763300 4871 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_openshift-marketplace_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9_0(9f0c9b88378f23236998984446c16dca956e647fc61f5639a94ffa8a0a83dde9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 05:35:43 crc kubenswrapper[4871]: E1126 05:35:43.763391 4871 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_openshift-marketplace_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9_0(9f0c9b88378f23236998984446c16dca956e647fc61f5639a94ffa8a0a83dde9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:35:43 crc kubenswrapper[4871]: E1126 05:35:43.763431 4871 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_openshift-marketplace_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9_0(9f0c9b88378f23236998984446c16dca956e647fc61f5639a94ffa8a0a83dde9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:35:43 crc kubenswrapper[4871]: E1126 05:35:43.763504 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_openshift-marketplace(0acf5a3d-2727-42dd-a502-e7b8ad27a0a9)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_openshift-marketplace(0acf5a3d-2727-42dd-a502-e7b8ad27a0a9)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_openshift-marketplace_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9_0(9f0c9b88378f23236998984446c16dca956e647fc61f5639a94ffa8a0a83dde9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" podUID="0acf5a3d-2727-42dd-a502-e7b8ad27a0a9" Nov 26 05:35:45 crc kubenswrapper[4871]: I1126 05:35:45.506767 4871 scope.go:117] "RemoveContainer" containerID="417bc65daf58d30ae61be4b4c6e5b7a604e2c0b9c899b8c31c9d9fe1276ba648" Nov 26 05:35:45 crc kubenswrapper[4871]: I1126 05:35:45.740970 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rpr6z_84290973-bc95-4326-bacd-7c210346620a/kube-multus/2.log" Nov 26 05:35:45 crc kubenswrapper[4871]: I1126 05:35:45.741286 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rpr6z" event={"ID":"84290973-bc95-4326-bacd-7c210346620a","Type":"ContainerStarted","Data":"3f82e2835967e9916a16c0b649d9c8ed96ed94ac5ff6e7a8d57716c3f58a0598"} Nov 26 05:35:47 crc kubenswrapper[4871]: I1126 05:35:47.084507 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rfsjj" Nov 26 05:35:55 crc kubenswrapper[4871]: I1126 05:35:55.507445 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:35:55 crc kubenswrapper[4871]: I1126 05:35:55.508475 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:35:55 crc kubenswrapper[4871]: I1126 05:35:55.781886 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d"] Nov 26 05:35:55 crc kubenswrapper[4871]: W1126 05:35:55.788201 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0acf5a3d_2727_42dd_a502_e7b8ad27a0a9.slice/crio-d049a419aa40afddee4ead37b53e78815c43a3fedd0b6cb664e9cb010b7f0a1d WatchSource:0}: Error finding container d049a419aa40afddee4ead37b53e78815c43a3fedd0b6cb664e9cb010b7f0a1d: Status 404 returned error can't find the container with id d049a419aa40afddee4ead37b53e78815c43a3fedd0b6cb664e9cb010b7f0a1d Nov 26 05:35:55 crc kubenswrapper[4871]: I1126 05:35:55.822387 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" event={"ID":"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9","Type":"ContainerStarted","Data":"d049a419aa40afddee4ead37b53e78815c43a3fedd0b6cb664e9cb010b7f0a1d"} Nov 26 05:35:56 crc kubenswrapper[4871]: I1126 05:35:56.832431 4871 generic.go:334] "Generic (PLEG): container finished" podID="0acf5a3d-2727-42dd-a502-e7b8ad27a0a9" containerID="bf338a797091dd15d45d5e360491e62c935e575f12076854497eb2996574e1fa" exitCode=0 Nov 26 05:35:56 crc kubenswrapper[4871]: I1126 05:35:56.832508 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" event={"ID":"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9","Type":"ContainerDied","Data":"bf338a797091dd15d45d5e360491e62c935e575f12076854497eb2996574e1fa"} Nov 26 05:35:58 crc kubenswrapper[4871]: I1126 05:35:58.849471 4871 generic.go:334] "Generic (PLEG): container finished" podID="0acf5a3d-2727-42dd-a502-e7b8ad27a0a9" containerID="5b289cd9a494df503e5f1ce7b0c6b160147f930d70cfa80324dd0a0110115fd8" exitCode=0 Nov 26 05:35:58 crc kubenswrapper[4871]: I1126 05:35:58.849885 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" event={"ID":"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9","Type":"ContainerDied","Data":"5b289cd9a494df503e5f1ce7b0c6b160147f930d70cfa80324dd0a0110115fd8"} Nov 26 05:35:59 crc kubenswrapper[4871]: I1126 05:35:59.861191 4871 generic.go:334] "Generic (PLEG): container finished" podID="0acf5a3d-2727-42dd-a502-e7b8ad27a0a9" containerID="f47aa869dfcd37214bc2b1b3fd4076d9f75f69d95e9070ff350137ffc0d82a24" exitCode=0 Nov 26 05:35:59 crc kubenswrapper[4871]: I1126 05:35:59.861327 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" event={"ID":"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9","Type":"ContainerDied","Data":"f47aa869dfcd37214bc2b1b3fd4076d9f75f69d95e9070ff350137ffc0d82a24"} Nov 26 05:36:01 crc kubenswrapper[4871]: I1126 05:36:01.172696 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:36:01 crc kubenswrapper[4871]: I1126 05:36:01.285312 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0acf5a3d-2727-42dd-a502-e7b8ad27a0a9-bundle\") pod \"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9\" (UID: \"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9\") " Nov 26 05:36:01 crc kubenswrapper[4871]: I1126 05:36:01.285393 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0acf5a3d-2727-42dd-a502-e7b8ad27a0a9-util\") pod \"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9\" (UID: \"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9\") " Nov 26 05:36:01 crc kubenswrapper[4871]: I1126 05:36:01.285423 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kgdht\" (UniqueName: \"kubernetes.io/projected/0acf5a3d-2727-42dd-a502-e7b8ad27a0a9-kube-api-access-kgdht\") pod \"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9\" (UID: \"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9\") " Nov 26 05:36:01 crc kubenswrapper[4871]: I1126 05:36:01.289520 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0acf5a3d-2727-42dd-a502-e7b8ad27a0a9-bundle" (OuterVolumeSpecName: "bundle") pod "0acf5a3d-2727-42dd-a502-e7b8ad27a0a9" (UID: "0acf5a3d-2727-42dd-a502-e7b8ad27a0a9"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:36:01 crc kubenswrapper[4871]: I1126 05:36:01.293508 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0acf5a3d-2727-42dd-a502-e7b8ad27a0a9-kube-api-access-kgdht" (OuterVolumeSpecName: "kube-api-access-kgdht") pod "0acf5a3d-2727-42dd-a502-e7b8ad27a0a9" (UID: "0acf5a3d-2727-42dd-a502-e7b8ad27a0a9"). InnerVolumeSpecName "kube-api-access-kgdht". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:36:01 crc kubenswrapper[4871]: I1126 05:36:01.387199 4871 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0acf5a3d-2727-42dd-a502-e7b8ad27a0a9-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:36:01 crc kubenswrapper[4871]: I1126 05:36:01.387234 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kgdht\" (UniqueName: \"kubernetes.io/projected/0acf5a3d-2727-42dd-a502-e7b8ad27a0a9-kube-api-access-kgdht\") on node \"crc\" DevicePath \"\"" Nov 26 05:36:01 crc kubenswrapper[4871]: I1126 05:36:01.394509 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0acf5a3d-2727-42dd-a502-e7b8ad27a0a9-util" (OuterVolumeSpecName: "util") pod "0acf5a3d-2727-42dd-a502-e7b8ad27a0a9" (UID: "0acf5a3d-2727-42dd-a502-e7b8ad27a0a9"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:36:01 crc kubenswrapper[4871]: I1126 05:36:01.488347 4871 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0acf5a3d-2727-42dd-a502-e7b8ad27a0a9-util\") on node \"crc\" DevicePath \"\"" Nov 26 05:36:01 crc kubenswrapper[4871]: I1126 05:36:01.883099 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" event={"ID":"0acf5a3d-2727-42dd-a502-e7b8ad27a0a9","Type":"ContainerDied","Data":"d049a419aa40afddee4ead37b53e78815c43a3fedd0b6cb664e9cb010b7f0a1d"} Nov 26 05:36:01 crc kubenswrapper[4871]: I1126 05:36:01.883147 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d049a419aa40afddee4ead37b53e78815c43a3fedd0b6cb664e9cb010b7f0a1d" Nov 26 05:36:01 crc kubenswrapper[4871]: I1126 05:36:01.883217 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d" Nov 26 05:36:11 crc kubenswrapper[4871]: I1126 05:36:11.846228 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-bmfw7"] Nov 26 05:36:11 crc kubenswrapper[4871]: E1126 05:36:11.847031 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0acf5a3d-2727-42dd-a502-e7b8ad27a0a9" containerName="util" Nov 26 05:36:11 crc kubenswrapper[4871]: I1126 05:36:11.847048 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="0acf5a3d-2727-42dd-a502-e7b8ad27a0a9" containerName="util" Nov 26 05:36:11 crc kubenswrapper[4871]: E1126 05:36:11.847063 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0acf5a3d-2727-42dd-a502-e7b8ad27a0a9" containerName="extract" Nov 26 05:36:11 crc kubenswrapper[4871]: I1126 05:36:11.847072 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="0acf5a3d-2727-42dd-a502-e7b8ad27a0a9" containerName="extract" Nov 26 05:36:11 crc kubenswrapper[4871]: E1126 05:36:11.847092 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0acf5a3d-2727-42dd-a502-e7b8ad27a0a9" containerName="pull" Nov 26 05:36:11 crc kubenswrapper[4871]: I1126 05:36:11.847100 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="0acf5a3d-2727-42dd-a502-e7b8ad27a0a9" containerName="pull" Nov 26 05:36:11 crc kubenswrapper[4871]: I1126 05:36:11.847218 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="0acf5a3d-2727-42dd-a502-e7b8ad27a0a9" containerName="extract" Nov 26 05:36:11 crc kubenswrapper[4871]: I1126 05:36:11.847669 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-bmfw7" Nov 26 05:36:11 crc kubenswrapper[4871]: I1126 05:36:11.849690 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 26 05:36:11 crc kubenswrapper[4871]: I1126 05:36:11.849920 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-kxnxk" Nov 26 05:36:11 crc kubenswrapper[4871]: I1126 05:36:11.855361 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 26 05:36:11 crc kubenswrapper[4871]: I1126 05:36:11.858871 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-bmfw7"] Nov 26 05:36:11 crc kubenswrapper[4871]: I1126 05:36:11.924507 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjdpg\" (UniqueName: \"kubernetes.io/projected/b9220e8d-267e-4462-b6ea-094a0f724eb3-kube-api-access-jjdpg\") pod \"obo-prometheus-operator-668cf9dfbb-bmfw7\" (UID: \"b9220e8d-267e-4462-b6ea-094a0f724eb3\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-bmfw7" Nov 26 05:36:11 crc kubenswrapper[4871]: I1126 05:36:11.971044 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj"] Nov 26 05:36:11 crc kubenswrapper[4871]: I1126 05:36:11.971873 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj" Nov 26 05:36:11 crc kubenswrapper[4871]: W1126 05:36:11.974031 4871 reflector.go:561] object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-ds5gp": failed to list *v1.Secret: secrets "obo-prometheus-operator-admission-webhook-dockercfg-ds5gp" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-operators": no relationship found between node 'crc' and this object Nov 26 05:36:11 crc kubenswrapper[4871]: E1126 05:36:11.974091 4871 reflector.go:158] "Unhandled Error" err="object-\"openshift-operators\"/\"obo-prometheus-operator-admission-webhook-dockercfg-ds5gp\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"obo-prometheus-operator-admission-webhook-dockercfg-ds5gp\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-operators\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 26 05:36:11 crc kubenswrapper[4871]: W1126 05:36:11.975336 4871 reflector.go:561] object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert": failed to list *v1.Secret: secrets "obo-prometheus-operator-admission-webhook-service-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-operators": no relationship found between node 'crc' and this object Nov 26 05:36:11 crc kubenswrapper[4871]: E1126 05:36:11.975363 4871 reflector.go:158] "Unhandled Error" err="object-\"openshift-operators\"/\"obo-prometheus-operator-admission-webhook-service-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"obo-prometheus-operator-admission-webhook-service-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-operators\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 26 05:36:11 crc kubenswrapper[4871]: I1126 05:36:11.994510 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj"] Nov 26 05:36:11 crc kubenswrapper[4871]: I1126 05:36:11.998685 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5"] Nov 26 05:36:11 crc kubenswrapper[4871]: I1126 05:36:11.999733 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.025472 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjdpg\" (UniqueName: \"kubernetes.io/projected/b9220e8d-267e-4462-b6ea-094a0f724eb3-kube-api-access-jjdpg\") pod \"obo-prometheus-operator-668cf9dfbb-bmfw7\" (UID: \"b9220e8d-267e-4462-b6ea-094a0f724eb3\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-bmfw7" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.029198 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5"] Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.070372 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjdpg\" (UniqueName: \"kubernetes.io/projected/b9220e8d-267e-4462-b6ea-094a0f724eb3-kube-api-access-jjdpg\") pod \"obo-prometheus-operator-668cf9dfbb-bmfw7\" (UID: \"b9220e8d-267e-4462-b6ea-094a0f724eb3\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-bmfw7" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.126520 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3cd1c1e8-5430-4209-a0e2-3176d0ebb70a-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5\" (UID: \"3cd1c1e8-5430-4209-a0e2-3176d0ebb70a\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.126600 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3cd1c1e8-5430-4209-a0e2-3176d0ebb70a-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5\" (UID: \"3cd1c1e8-5430-4209-a0e2-3176d0ebb70a\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.126640 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/995a4906-508d-4285-b40c-5b14fd9d7b98-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj\" (UID: \"995a4906-508d-4285-b40c-5b14fd9d7b98\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.126672 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/995a4906-508d-4285-b40c-5b14fd9d7b98-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj\" (UID: \"995a4906-508d-4285-b40c-5b14fd9d7b98\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.163889 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-bmfw7" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.165259 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-l8rk7"] Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.166069 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-l8rk7" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.168015 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-pzwkq" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.168733 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.179302 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-l8rk7"] Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.228253 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3cd1c1e8-5430-4209-a0e2-3176d0ebb70a-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5\" (UID: \"3cd1c1e8-5430-4209-a0e2-3176d0ebb70a\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.228321 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/995a4906-508d-4285-b40c-5b14fd9d7b98-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj\" (UID: \"995a4906-508d-4285-b40c-5b14fd9d7b98\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.228356 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/995a4906-508d-4285-b40c-5b14fd9d7b98-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj\" (UID: \"995a4906-508d-4285-b40c-5b14fd9d7b98\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.228379 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g98jr\" (UniqueName: \"kubernetes.io/projected/2e5f535a-ead4-47e3-a477-20cf74b0828a-kube-api-access-g98jr\") pod \"observability-operator-d8bb48f5d-l8rk7\" (UID: \"2e5f535a-ead4-47e3-a477-20cf74b0828a\") " pod="openshift-operators/observability-operator-d8bb48f5d-l8rk7" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.228425 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3cd1c1e8-5430-4209-a0e2-3176d0ebb70a-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5\" (UID: \"3cd1c1e8-5430-4209-a0e2-3176d0ebb70a\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.228448 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/2e5f535a-ead4-47e3-a477-20cf74b0828a-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-l8rk7\" (UID: \"2e5f535a-ead4-47e3-a477-20cf74b0828a\") " pod="openshift-operators/observability-operator-d8bb48f5d-l8rk7" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.284633 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-9f8vw"] Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.285282 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-9f8vw" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.287831 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-cpdzr" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.301458 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-9f8vw"] Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.329066 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g98jr\" (UniqueName: \"kubernetes.io/projected/2e5f535a-ead4-47e3-a477-20cf74b0828a-kube-api-access-g98jr\") pod \"observability-operator-d8bb48f5d-l8rk7\" (UID: \"2e5f535a-ead4-47e3-a477-20cf74b0828a\") " pod="openshift-operators/observability-operator-d8bb48f5d-l8rk7" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.329123 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/2e5f535a-ead4-47e3-a477-20cf74b0828a-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-l8rk7\" (UID: \"2e5f535a-ead4-47e3-a477-20cf74b0828a\") " pod="openshift-operators/observability-operator-d8bb48f5d-l8rk7" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.333140 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/2e5f535a-ead4-47e3-a477-20cf74b0828a-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-l8rk7\" (UID: \"2e5f535a-ead4-47e3-a477-20cf74b0828a\") " pod="openshift-operators/observability-operator-d8bb48f5d-l8rk7" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.343555 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g98jr\" (UniqueName: \"kubernetes.io/projected/2e5f535a-ead4-47e3-a477-20cf74b0828a-kube-api-access-g98jr\") pod \"observability-operator-d8bb48f5d-l8rk7\" (UID: \"2e5f535a-ead4-47e3-a477-20cf74b0828a\") " pod="openshift-operators/observability-operator-d8bb48f5d-l8rk7" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.424105 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-bmfw7"] Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.430168 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/b0a308b8-6586-4d48-b431-ce0c6f46a23e-openshift-service-ca\") pod \"perses-operator-5446b9c989-9f8vw\" (UID: \"b0a308b8-6586-4d48-b431-ce0c6f46a23e\") " pod="openshift-operators/perses-operator-5446b9c989-9f8vw" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.430555 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g699z\" (UniqueName: \"kubernetes.io/projected/b0a308b8-6586-4d48-b431-ce0c6f46a23e-kube-api-access-g699z\") pod \"perses-operator-5446b9c989-9f8vw\" (UID: \"b0a308b8-6586-4d48-b431-ce0c6f46a23e\") " pod="openshift-operators/perses-operator-5446b9c989-9f8vw" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.483321 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-l8rk7" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.532134 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g699z\" (UniqueName: \"kubernetes.io/projected/b0a308b8-6586-4d48-b431-ce0c6f46a23e-kube-api-access-g699z\") pod \"perses-operator-5446b9c989-9f8vw\" (UID: \"b0a308b8-6586-4d48-b431-ce0c6f46a23e\") " pod="openshift-operators/perses-operator-5446b9c989-9f8vw" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.532209 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/b0a308b8-6586-4d48-b431-ce0c6f46a23e-openshift-service-ca\") pod \"perses-operator-5446b9c989-9f8vw\" (UID: \"b0a308b8-6586-4d48-b431-ce0c6f46a23e\") " pod="openshift-operators/perses-operator-5446b9c989-9f8vw" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.533109 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/b0a308b8-6586-4d48-b431-ce0c6f46a23e-openshift-service-ca\") pod \"perses-operator-5446b9c989-9f8vw\" (UID: \"b0a308b8-6586-4d48-b431-ce0c6f46a23e\") " pod="openshift-operators/perses-operator-5446b9c989-9f8vw" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.551193 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g699z\" (UniqueName: \"kubernetes.io/projected/b0a308b8-6586-4d48-b431-ce0c6f46a23e-kube-api-access-g699z\") pod \"perses-operator-5446b9c989-9f8vw\" (UID: \"b0a308b8-6586-4d48-b431-ce0c6f46a23e\") " pod="openshift-operators/perses-operator-5446b9c989-9f8vw" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.620159 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-9f8vw" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.741833 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-l8rk7"] Nov 26 05:36:12 crc kubenswrapper[4871]: W1126 05:36:12.768200 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2e5f535a_ead4_47e3_a477_20cf74b0828a.slice/crio-c45ddb59e74ee41051bc233a65ac164b58bfdc11e91cbf12f59173dfd9f8bee2 WatchSource:0}: Error finding container c45ddb59e74ee41051bc233a65ac164b58bfdc11e91cbf12f59173dfd9f8bee2: Status 404 returned error can't find the container with id c45ddb59e74ee41051bc233a65ac164b58bfdc11e91cbf12f59173dfd9f8bee2 Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.842953 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-9f8vw"] Nov 26 05:36:12 crc kubenswrapper[4871]: W1126 05:36:12.849790 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0a308b8_6586_4d48_b431_ce0c6f46a23e.slice/crio-45c02e352a5d510c8a72c40f85c090374c4d143963230453a6f30dc6f672d243 WatchSource:0}: Error finding container 45c02e352a5d510c8a72c40f85c090374c4d143963230453a6f30dc6f672d243: Status 404 returned error can't find the container with id 45c02e352a5d510c8a72c40f85c090374c4d143963230453a6f30dc6f672d243 Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.904563 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.913026 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/995a4906-508d-4285-b40c-5b14fd9d7b98-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj\" (UID: \"995a4906-508d-4285-b40c-5b14fd9d7b98\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.913063 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3cd1c1e8-5430-4209-a0e2-3176d0ebb70a-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5\" (UID: \"3cd1c1e8-5430-4209-a0e2-3176d0ebb70a\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.913140 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/995a4906-508d-4285-b40c-5b14fd9d7b98-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj\" (UID: \"995a4906-508d-4285-b40c-5b14fd9d7b98\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.918013 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3cd1c1e8-5430-4209-a0e2-3176d0ebb70a-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5\" (UID: \"3cd1c1e8-5430-4209-a0e2-3176d0ebb70a\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5" Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.949010 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-l8rk7" event={"ID":"2e5f535a-ead4-47e3-a477-20cf74b0828a","Type":"ContainerStarted","Data":"c45ddb59e74ee41051bc233a65ac164b58bfdc11e91cbf12f59173dfd9f8bee2"} Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.949914 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-9f8vw" event={"ID":"b0a308b8-6586-4d48-b431-ce0c6f46a23e","Type":"ContainerStarted","Data":"45c02e352a5d510c8a72c40f85c090374c4d143963230453a6f30dc6f672d243"} Nov 26 05:36:12 crc kubenswrapper[4871]: I1126 05:36:12.951099 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-bmfw7" event={"ID":"b9220e8d-267e-4462-b6ea-094a0f724eb3","Type":"ContainerStarted","Data":"bdd0a0421fbad097920b6b140285209e0ff45763756ff5ab495788fe37f5b9ab"} Nov 26 05:36:13 crc kubenswrapper[4871]: I1126 05:36:13.493032 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-ds5gp" Nov 26 05:36:13 crc kubenswrapper[4871]: I1126 05:36:13.496580 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5" Nov 26 05:36:13 crc kubenswrapper[4871]: I1126 05:36:13.496644 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj" Nov 26 05:36:13 crc kubenswrapper[4871]: I1126 05:36:13.754401 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5"] Nov 26 05:36:13 crc kubenswrapper[4871]: W1126 05:36:13.766876 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3cd1c1e8_5430_4209_a0e2_3176d0ebb70a.slice/crio-246cda0c1bb45d8593ad826c3b7a399b74243a19425d0895f3cf30c52b9b35b3 WatchSource:0}: Error finding container 246cda0c1bb45d8593ad826c3b7a399b74243a19425d0895f3cf30c52b9b35b3: Status 404 returned error can't find the container with id 246cda0c1bb45d8593ad826c3b7a399b74243a19425d0895f3cf30c52b9b35b3 Nov 26 05:36:13 crc kubenswrapper[4871]: I1126 05:36:13.859800 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj"] Nov 26 05:36:13 crc kubenswrapper[4871]: W1126 05:36:13.870742 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod995a4906_508d_4285_b40c_5b14fd9d7b98.slice/crio-ad6a6f661ddd4c8f00d0aa808e5a7351e3071acd57e63b24f84e4d9bdcc268e1 WatchSource:0}: Error finding container ad6a6f661ddd4c8f00d0aa808e5a7351e3071acd57e63b24f84e4d9bdcc268e1: Status 404 returned error can't find the container with id ad6a6f661ddd4c8f00d0aa808e5a7351e3071acd57e63b24f84e4d9bdcc268e1 Nov 26 05:36:13 crc kubenswrapper[4871]: I1126 05:36:13.972331 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5" event={"ID":"3cd1c1e8-5430-4209-a0e2-3176d0ebb70a","Type":"ContainerStarted","Data":"246cda0c1bb45d8593ad826c3b7a399b74243a19425d0895f3cf30c52b9b35b3"} Nov 26 05:36:13 crc kubenswrapper[4871]: I1126 05:36:13.982503 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj" event={"ID":"995a4906-508d-4285-b40c-5b14fd9d7b98","Type":"ContainerStarted","Data":"ad6a6f661ddd4c8f00d0aa808e5a7351e3071acd57e63b24f84e4d9bdcc268e1"} Nov 26 05:36:28 crc kubenswrapper[4871]: I1126 05:36:28.130012 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj" event={"ID":"995a4906-508d-4285-b40c-5b14fd9d7b98","Type":"ContainerStarted","Data":"0870438d007cd1861f0f1d225fa6add8f57318660051758cc09af9e62fca14cc"} Nov 26 05:36:28 crc kubenswrapper[4871]: I1126 05:36:28.134434 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-9f8vw" event={"ID":"b0a308b8-6586-4d48-b431-ce0c6f46a23e","Type":"ContainerStarted","Data":"2153b2f58c7dd02e5a64a65e470539ff16cacd807db41d198ba6acdc68dda9ae"} Nov 26 05:36:28 crc kubenswrapper[4871]: I1126 05:36:28.135194 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-9f8vw" Nov 26 05:36:28 crc kubenswrapper[4871]: I1126 05:36:28.140470 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-bmfw7" event={"ID":"b9220e8d-267e-4462-b6ea-094a0f724eb3","Type":"ContainerStarted","Data":"bca4fb2b0ce6b232d9dee31379594f6fc6afba3ddf2b25f0d4127e2bdcb71b46"} Nov 26 05:36:28 crc kubenswrapper[4871]: I1126 05:36:28.142678 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5" event={"ID":"3cd1c1e8-5430-4209-a0e2-3176d0ebb70a","Type":"ContainerStarted","Data":"3ec5c865b9d513348166fb9f80f1a48698dcb51a83878acf3466076d0a459c20"} Nov 26 05:36:28 crc kubenswrapper[4871]: I1126 05:36:28.145332 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-l8rk7" event={"ID":"2e5f535a-ead4-47e3-a477-20cf74b0828a","Type":"ContainerStarted","Data":"cdd72882a31dd3aac91bec163edbf38d10529f6080c2e6ec415a781c2d199001"} Nov 26 05:36:28 crc kubenswrapper[4871]: I1126 05:36:28.145597 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-l8rk7" Nov 26 05:36:28 crc kubenswrapper[4871]: I1126 05:36:28.156718 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-l8rk7" Nov 26 05:36:28 crc kubenswrapper[4871]: I1126 05:36:28.181847 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj" podStartSLOduration=3.7688783729999997 podStartE2EDuration="17.181826913s" podCreationTimestamp="2025-11-26 05:36:11 +0000 UTC" firstStartedPulling="2025-11-26 05:36:13.873821281 +0000 UTC m=+632.056872867" lastFinishedPulling="2025-11-26 05:36:27.286769821 +0000 UTC m=+645.469821407" observedRunningTime="2025-11-26 05:36:28.157331197 +0000 UTC m=+646.340382783" watchObservedRunningTime="2025-11-26 05:36:28.181826913 +0000 UTC m=+646.364878499" Nov 26 05:36:28 crc kubenswrapper[4871]: I1126 05:36:28.183666 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-9f8vw" podStartSLOduration=1.752315955 podStartE2EDuration="16.183655139s" podCreationTimestamp="2025-11-26 05:36:12 +0000 UTC" firstStartedPulling="2025-11-26 05:36:12.857982331 +0000 UTC m=+631.041033917" lastFinishedPulling="2025-11-26 05:36:27.289321515 +0000 UTC m=+645.472373101" observedRunningTime="2025-11-26 05:36:28.179461974 +0000 UTC m=+646.362513560" watchObservedRunningTime="2025-11-26 05:36:28.183655139 +0000 UTC m=+646.366706725" Nov 26 05:36:28 crc kubenswrapper[4871]: I1126 05:36:28.214816 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5" podStartSLOduration=3.703756044 podStartE2EDuration="17.214797351s" podCreationTimestamp="2025-11-26 05:36:11 +0000 UTC" firstStartedPulling="2025-11-26 05:36:13.769853646 +0000 UTC m=+631.952905232" lastFinishedPulling="2025-11-26 05:36:27.280894953 +0000 UTC m=+645.463946539" observedRunningTime="2025-11-26 05:36:28.210647307 +0000 UTC m=+646.393698893" watchObservedRunningTime="2025-11-26 05:36:28.214797351 +0000 UTC m=+646.397848937" Nov 26 05:36:28 crc kubenswrapper[4871]: I1126 05:36:28.233142 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-bmfw7" podStartSLOduration=2.384518263 podStartE2EDuration="17.233119002s" podCreationTimestamp="2025-11-26 05:36:11 +0000 UTC" firstStartedPulling="2025-11-26 05:36:12.433768111 +0000 UTC m=+630.616819697" lastFinishedPulling="2025-11-26 05:36:27.28236885 +0000 UTC m=+645.465420436" observedRunningTime="2025-11-26 05:36:28.23025522 +0000 UTC m=+646.413306816" watchObservedRunningTime="2025-11-26 05:36:28.233119002 +0000 UTC m=+646.416170588" Nov 26 05:36:28 crc kubenswrapper[4871]: I1126 05:36:28.266855 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-l8rk7" podStartSLOduration=1.755137355 podStartE2EDuration="16.2668377s" podCreationTimestamp="2025-11-26 05:36:12 +0000 UTC" firstStartedPulling="2025-11-26 05:36:12.769910076 +0000 UTC m=+630.952961662" lastFinishedPulling="2025-11-26 05:36:27.281610421 +0000 UTC m=+645.464662007" observedRunningTime="2025-11-26 05:36:28.26562158 +0000 UTC m=+646.448673186" watchObservedRunningTime="2025-11-26 05:36:28.2668377 +0000 UTC m=+646.449889296" Nov 26 05:36:32 crc kubenswrapper[4871]: I1126 05:36:32.623052 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-9f8vw" Nov 26 05:36:50 crc kubenswrapper[4871]: I1126 05:36:50.067250 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr"] Nov 26 05:36:50 crc kubenswrapper[4871]: I1126 05:36:50.069456 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr" Nov 26 05:36:50 crc kubenswrapper[4871]: I1126 05:36:50.075071 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 05:36:50 crc kubenswrapper[4871]: I1126 05:36:50.098763 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr"] Nov 26 05:36:50 crc kubenswrapper[4871]: I1126 05:36:50.192164 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b8b55d68-fcd3-43c4-94fe-344ed7cdb002-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr\" (UID: \"b8b55d68-fcd3-43c4-94fe-344ed7cdb002\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr" Nov 26 05:36:50 crc kubenswrapper[4871]: I1126 05:36:50.192320 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-549xt\" (UniqueName: \"kubernetes.io/projected/b8b55d68-fcd3-43c4-94fe-344ed7cdb002-kube-api-access-549xt\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr\" (UID: \"b8b55d68-fcd3-43c4-94fe-344ed7cdb002\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr" Nov 26 05:36:50 crc kubenswrapper[4871]: I1126 05:36:50.192451 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b8b55d68-fcd3-43c4-94fe-344ed7cdb002-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr\" (UID: \"b8b55d68-fcd3-43c4-94fe-344ed7cdb002\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr" Nov 26 05:36:50 crc kubenswrapper[4871]: I1126 05:36:50.293467 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b8b55d68-fcd3-43c4-94fe-344ed7cdb002-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr\" (UID: \"b8b55d68-fcd3-43c4-94fe-344ed7cdb002\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr" Nov 26 05:36:50 crc kubenswrapper[4871]: I1126 05:36:50.293517 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-549xt\" (UniqueName: \"kubernetes.io/projected/b8b55d68-fcd3-43c4-94fe-344ed7cdb002-kube-api-access-549xt\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr\" (UID: \"b8b55d68-fcd3-43c4-94fe-344ed7cdb002\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr" Nov 26 05:36:50 crc kubenswrapper[4871]: I1126 05:36:50.293583 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b8b55d68-fcd3-43c4-94fe-344ed7cdb002-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr\" (UID: \"b8b55d68-fcd3-43c4-94fe-344ed7cdb002\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr" Nov 26 05:36:50 crc kubenswrapper[4871]: I1126 05:36:50.294108 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b8b55d68-fcd3-43c4-94fe-344ed7cdb002-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr\" (UID: \"b8b55d68-fcd3-43c4-94fe-344ed7cdb002\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr" Nov 26 05:36:50 crc kubenswrapper[4871]: I1126 05:36:50.294133 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b8b55d68-fcd3-43c4-94fe-344ed7cdb002-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr\" (UID: \"b8b55d68-fcd3-43c4-94fe-344ed7cdb002\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr" Nov 26 05:36:50 crc kubenswrapper[4871]: I1126 05:36:50.323509 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-549xt\" (UniqueName: \"kubernetes.io/projected/b8b55d68-fcd3-43c4-94fe-344ed7cdb002-kube-api-access-549xt\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr\" (UID: \"b8b55d68-fcd3-43c4-94fe-344ed7cdb002\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr" Nov 26 05:36:50 crc kubenswrapper[4871]: I1126 05:36:50.403909 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr" Nov 26 05:36:50 crc kubenswrapper[4871]: I1126 05:36:50.893235 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr"] Nov 26 05:36:51 crc kubenswrapper[4871]: E1126 05:36:51.240659 4871 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb8b55d68_fcd3_43c4_94fe_344ed7cdb002.slice/crio-conmon-c78c80d1fa8957180e91bc32a49cc79257e1d3b6070183f7f1d7bed4ceae9603.scope\": RecentStats: unable to find data in memory cache]" Nov 26 05:36:51 crc kubenswrapper[4871]: I1126 05:36:51.278755 4871 generic.go:334] "Generic (PLEG): container finished" podID="b8b55d68-fcd3-43c4-94fe-344ed7cdb002" containerID="c78c80d1fa8957180e91bc32a49cc79257e1d3b6070183f7f1d7bed4ceae9603" exitCode=0 Nov 26 05:36:51 crc kubenswrapper[4871]: I1126 05:36:51.278800 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr" event={"ID":"b8b55d68-fcd3-43c4-94fe-344ed7cdb002","Type":"ContainerDied","Data":"c78c80d1fa8957180e91bc32a49cc79257e1d3b6070183f7f1d7bed4ceae9603"} Nov 26 05:36:51 crc kubenswrapper[4871]: I1126 05:36:51.278826 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr" event={"ID":"b8b55d68-fcd3-43c4-94fe-344ed7cdb002","Type":"ContainerStarted","Data":"721e61ae747e78fd3e107ebc113e374b1821a200530e9f03a0a2969b77faf7c3"} Nov 26 05:36:53 crc kubenswrapper[4871]: I1126 05:36:53.293894 4871 generic.go:334] "Generic (PLEG): container finished" podID="b8b55d68-fcd3-43c4-94fe-344ed7cdb002" containerID="f990ad21a956c56790801e2f7f51a7433340554827b147eadc580062ad70cf08" exitCode=0 Nov 26 05:36:53 crc kubenswrapper[4871]: I1126 05:36:53.293949 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr" event={"ID":"b8b55d68-fcd3-43c4-94fe-344ed7cdb002","Type":"ContainerDied","Data":"f990ad21a956c56790801e2f7f51a7433340554827b147eadc580062ad70cf08"} Nov 26 05:36:56 crc kubenswrapper[4871]: I1126 05:36:56.324623 4871 generic.go:334] "Generic (PLEG): container finished" podID="b8b55d68-fcd3-43c4-94fe-344ed7cdb002" containerID="1b503ba23110650fb9a9fc00bffa6e4e7cd4c79ca8bf76642abb5ab54039ab09" exitCode=0 Nov 26 05:36:56 crc kubenswrapper[4871]: I1126 05:36:56.325054 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr" event={"ID":"b8b55d68-fcd3-43c4-94fe-344ed7cdb002","Type":"ContainerDied","Data":"1b503ba23110650fb9a9fc00bffa6e4e7cd4c79ca8bf76642abb5ab54039ab09"} Nov 26 05:36:57 crc kubenswrapper[4871]: I1126 05:36:57.657122 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr" Nov 26 05:36:57 crc kubenswrapper[4871]: I1126 05:36:57.729939 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-549xt\" (UniqueName: \"kubernetes.io/projected/b8b55d68-fcd3-43c4-94fe-344ed7cdb002-kube-api-access-549xt\") pod \"b8b55d68-fcd3-43c4-94fe-344ed7cdb002\" (UID: \"b8b55d68-fcd3-43c4-94fe-344ed7cdb002\") " Nov 26 05:36:57 crc kubenswrapper[4871]: I1126 05:36:57.730021 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b8b55d68-fcd3-43c4-94fe-344ed7cdb002-util\") pod \"b8b55d68-fcd3-43c4-94fe-344ed7cdb002\" (UID: \"b8b55d68-fcd3-43c4-94fe-344ed7cdb002\") " Nov 26 05:36:57 crc kubenswrapper[4871]: I1126 05:36:57.730100 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b8b55d68-fcd3-43c4-94fe-344ed7cdb002-bundle\") pod \"b8b55d68-fcd3-43c4-94fe-344ed7cdb002\" (UID: \"b8b55d68-fcd3-43c4-94fe-344ed7cdb002\") " Nov 26 05:36:57 crc kubenswrapper[4871]: I1126 05:36:57.730811 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8b55d68-fcd3-43c4-94fe-344ed7cdb002-bundle" (OuterVolumeSpecName: "bundle") pod "b8b55d68-fcd3-43c4-94fe-344ed7cdb002" (UID: "b8b55d68-fcd3-43c4-94fe-344ed7cdb002"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:36:57 crc kubenswrapper[4871]: I1126 05:36:57.735760 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8b55d68-fcd3-43c4-94fe-344ed7cdb002-kube-api-access-549xt" (OuterVolumeSpecName: "kube-api-access-549xt") pod "b8b55d68-fcd3-43c4-94fe-344ed7cdb002" (UID: "b8b55d68-fcd3-43c4-94fe-344ed7cdb002"). InnerVolumeSpecName "kube-api-access-549xt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:36:57 crc kubenswrapper[4871]: I1126 05:36:57.746313 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8b55d68-fcd3-43c4-94fe-344ed7cdb002-util" (OuterVolumeSpecName: "util") pod "b8b55d68-fcd3-43c4-94fe-344ed7cdb002" (UID: "b8b55d68-fcd3-43c4-94fe-344ed7cdb002"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:36:57 crc kubenswrapper[4871]: I1126 05:36:57.831752 4871 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b8b55d68-fcd3-43c4-94fe-344ed7cdb002-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:36:57 crc kubenswrapper[4871]: I1126 05:36:57.832162 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-549xt\" (UniqueName: \"kubernetes.io/projected/b8b55d68-fcd3-43c4-94fe-344ed7cdb002-kube-api-access-549xt\") on node \"crc\" DevicePath \"\"" Nov 26 05:36:57 crc kubenswrapper[4871]: I1126 05:36:57.832179 4871 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b8b55d68-fcd3-43c4-94fe-344ed7cdb002-util\") on node \"crc\" DevicePath \"\"" Nov 26 05:36:58 crc kubenswrapper[4871]: I1126 05:36:58.341674 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr" event={"ID":"b8b55d68-fcd3-43c4-94fe-344ed7cdb002","Type":"ContainerDied","Data":"721e61ae747e78fd3e107ebc113e374b1821a200530e9f03a0a2969b77faf7c3"} Nov 26 05:36:58 crc kubenswrapper[4871]: I1126 05:36:58.341747 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="721e61ae747e78fd3e107ebc113e374b1821a200530e9f03a0a2969b77faf7c3" Nov 26 05:36:58 crc kubenswrapper[4871]: I1126 05:36:58.341835 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr" Nov 26 05:37:01 crc kubenswrapper[4871]: I1126 05:37:01.614008 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-xm9jn"] Nov 26 05:37:01 crc kubenswrapper[4871]: E1126 05:37:01.614483 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8b55d68-fcd3-43c4-94fe-344ed7cdb002" containerName="util" Nov 26 05:37:01 crc kubenswrapper[4871]: I1126 05:37:01.614494 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8b55d68-fcd3-43c4-94fe-344ed7cdb002" containerName="util" Nov 26 05:37:01 crc kubenswrapper[4871]: E1126 05:37:01.614505 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8b55d68-fcd3-43c4-94fe-344ed7cdb002" containerName="extract" Nov 26 05:37:01 crc kubenswrapper[4871]: I1126 05:37:01.614511 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8b55d68-fcd3-43c4-94fe-344ed7cdb002" containerName="extract" Nov 26 05:37:01 crc kubenswrapper[4871]: E1126 05:37:01.614536 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8b55d68-fcd3-43c4-94fe-344ed7cdb002" containerName="pull" Nov 26 05:37:01 crc kubenswrapper[4871]: I1126 05:37:01.614542 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8b55d68-fcd3-43c4-94fe-344ed7cdb002" containerName="pull" Nov 26 05:37:01 crc kubenswrapper[4871]: I1126 05:37:01.614638 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8b55d68-fcd3-43c4-94fe-344ed7cdb002" containerName="extract" Nov 26 05:37:01 crc kubenswrapper[4871]: I1126 05:37:01.614985 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-xm9jn" Nov 26 05:37:01 crc kubenswrapper[4871]: I1126 05:37:01.616828 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 26 05:37:01 crc kubenswrapper[4871]: I1126 05:37:01.616952 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-wnwhg" Nov 26 05:37:01 crc kubenswrapper[4871]: I1126 05:37:01.617270 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 26 05:37:01 crc kubenswrapper[4871]: I1126 05:37:01.626634 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-xm9jn"] Nov 26 05:37:01 crc kubenswrapper[4871]: I1126 05:37:01.787307 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ch8ll\" (UniqueName: \"kubernetes.io/projected/4b9d5c2d-8d95-4b86-86e4-6e425a8c6814-kube-api-access-ch8ll\") pod \"nmstate-operator-557fdffb88-xm9jn\" (UID: \"4b9d5c2d-8d95-4b86-86e4-6e425a8c6814\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-xm9jn" Nov 26 05:37:01 crc kubenswrapper[4871]: I1126 05:37:01.888657 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ch8ll\" (UniqueName: \"kubernetes.io/projected/4b9d5c2d-8d95-4b86-86e4-6e425a8c6814-kube-api-access-ch8ll\") pod \"nmstate-operator-557fdffb88-xm9jn\" (UID: \"4b9d5c2d-8d95-4b86-86e4-6e425a8c6814\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-xm9jn" Nov 26 05:37:01 crc kubenswrapper[4871]: I1126 05:37:01.922988 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ch8ll\" (UniqueName: \"kubernetes.io/projected/4b9d5c2d-8d95-4b86-86e4-6e425a8c6814-kube-api-access-ch8ll\") pod \"nmstate-operator-557fdffb88-xm9jn\" (UID: \"4b9d5c2d-8d95-4b86-86e4-6e425a8c6814\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-xm9jn" Nov 26 05:37:01 crc kubenswrapper[4871]: I1126 05:37:01.930775 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-xm9jn" Nov 26 05:37:02 crc kubenswrapper[4871]: I1126 05:37:02.179371 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-xm9jn"] Nov 26 05:37:02 crc kubenswrapper[4871]: I1126 05:37:02.365511 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-xm9jn" event={"ID":"4b9d5c2d-8d95-4b86-86e4-6e425a8c6814","Type":"ContainerStarted","Data":"9e08f304da3dffdec2d0bb82f8870fe4ed3e3fb8e53614a769ebbffff7677009"} Nov 26 05:37:05 crc kubenswrapper[4871]: I1126 05:37:05.388264 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-xm9jn" event={"ID":"4b9d5c2d-8d95-4b86-86e4-6e425a8c6814","Type":"ContainerStarted","Data":"505c5b04c52fcc7002576aa3e843388098f3a47c0ca712e6e5aec73103bf0a8c"} Nov 26 05:37:05 crc kubenswrapper[4871]: I1126 05:37:05.418175 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-xm9jn" podStartSLOduration=2.332769953 podStartE2EDuration="4.418145524s" podCreationTimestamp="2025-11-26 05:37:01 +0000 UTC" firstStartedPulling="2025-11-26 05:37:02.197381713 +0000 UTC m=+680.380433309" lastFinishedPulling="2025-11-26 05:37:04.282757284 +0000 UTC m=+682.465808880" observedRunningTime="2025-11-26 05:37:05.413253961 +0000 UTC m=+683.596305587" watchObservedRunningTime="2025-11-26 05:37:05.418145524 +0000 UTC m=+683.601197140" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.254179 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-z92tv"] Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.255855 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-z92tv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.258121 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-6xdcj" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.272229 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-ngv76"] Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.272913 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ngv76" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.274494 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.321493 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-z92tv"] Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.324559 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-ngv76"] Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.328381 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qb6w9\" (UniqueName: \"kubernetes.io/projected/d6cb9226-08bd-44d7-97b7-ac75848ef5bd-kube-api-access-qb6w9\") pod \"nmstate-metrics-5dcf9c57c5-z92tv\" (UID: \"d6cb9226-08bd-44d7-97b7-ac75848ef5bd\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-z92tv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.328582 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/85b6422a-f943-4ced-8695-3d7f52f5f145-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-ngv76\" (UID: \"85b6422a-f943-4ced-8695-3d7f52f5f145\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ngv76" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.328616 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lq5g7\" (UniqueName: \"kubernetes.io/projected/85b6422a-f943-4ced-8695-3d7f52f5f145-kube-api-access-lq5g7\") pod \"nmstate-webhook-6b89b748d8-ngv76\" (UID: \"85b6422a-f943-4ced-8695-3d7f52f5f145\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ngv76" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.339585 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-w8pzw"] Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.340231 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-w8pzw" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.407380 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-kgf8h"] Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.408316 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-kgf8h" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.411651 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.412012 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.412113 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-8fhbp" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.429102 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-kgf8h"] Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.429209 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-588nw\" (UniqueName: \"kubernetes.io/projected/f24be1df-be1a-4389-a3d5-7842b91f18b4-kube-api-access-588nw\") pod \"nmstate-handler-w8pzw\" (UID: \"f24be1df-be1a-4389-a3d5-7842b91f18b4\") " pod="openshift-nmstate/nmstate-handler-w8pzw" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.429301 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/f24be1df-be1a-4389-a3d5-7842b91f18b4-ovs-socket\") pod \"nmstate-handler-w8pzw\" (UID: \"f24be1df-be1a-4389-a3d5-7842b91f18b4\") " pod="openshift-nmstate/nmstate-handler-w8pzw" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.429376 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/bacf9337-da95-4df5-9f49-a9e6c46ac060-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-kgf8h\" (UID: \"bacf9337-da95-4df5-9f49-a9e6c46ac060\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-kgf8h" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.429462 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qb6w9\" (UniqueName: \"kubernetes.io/projected/d6cb9226-08bd-44d7-97b7-ac75848ef5bd-kube-api-access-qb6w9\") pod \"nmstate-metrics-5dcf9c57c5-z92tv\" (UID: \"d6cb9226-08bd-44d7-97b7-ac75848ef5bd\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-z92tv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.429557 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/f24be1df-be1a-4389-a3d5-7842b91f18b4-nmstate-lock\") pod \"nmstate-handler-w8pzw\" (UID: \"f24be1df-be1a-4389-a3d5-7842b91f18b4\") " pod="openshift-nmstate/nmstate-handler-w8pzw" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.429656 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/f24be1df-be1a-4389-a3d5-7842b91f18b4-dbus-socket\") pod \"nmstate-handler-w8pzw\" (UID: \"f24be1df-be1a-4389-a3d5-7842b91f18b4\") " pod="openshift-nmstate/nmstate-handler-w8pzw" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.429747 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/bacf9337-da95-4df5-9f49-a9e6c46ac060-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-kgf8h\" (UID: \"bacf9337-da95-4df5-9f49-a9e6c46ac060\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-kgf8h" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.429825 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/85b6422a-f943-4ced-8695-3d7f52f5f145-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-ngv76\" (UID: \"85b6422a-f943-4ced-8695-3d7f52f5f145\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ngv76" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.429890 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lq5g7\" (UniqueName: \"kubernetes.io/projected/85b6422a-f943-4ced-8695-3d7f52f5f145-kube-api-access-lq5g7\") pod \"nmstate-webhook-6b89b748d8-ngv76\" (UID: \"85b6422a-f943-4ced-8695-3d7f52f5f145\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ngv76" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.429975 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9x7s\" (UniqueName: \"kubernetes.io/projected/bacf9337-da95-4df5-9f49-a9e6c46ac060-kube-api-access-c9x7s\") pod \"nmstate-console-plugin-5874bd7bc5-kgf8h\" (UID: \"bacf9337-da95-4df5-9f49-a9e6c46ac060\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-kgf8h" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.443634 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/85b6422a-f943-4ced-8695-3d7f52f5f145-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-ngv76\" (UID: \"85b6422a-f943-4ced-8695-3d7f52f5f145\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ngv76" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.450629 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qb6w9\" (UniqueName: \"kubernetes.io/projected/d6cb9226-08bd-44d7-97b7-ac75848ef5bd-kube-api-access-qb6w9\") pod \"nmstate-metrics-5dcf9c57c5-z92tv\" (UID: \"d6cb9226-08bd-44d7-97b7-ac75848ef5bd\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-z92tv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.453702 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lq5g7\" (UniqueName: \"kubernetes.io/projected/85b6422a-f943-4ced-8695-3d7f52f5f145-kube-api-access-lq5g7\") pod \"nmstate-webhook-6b89b748d8-ngv76\" (UID: \"85b6422a-f943-4ced-8695-3d7f52f5f145\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ngv76" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.530728 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9x7s\" (UniqueName: \"kubernetes.io/projected/bacf9337-da95-4df5-9f49-a9e6c46ac060-kube-api-access-c9x7s\") pod \"nmstate-console-plugin-5874bd7bc5-kgf8h\" (UID: \"bacf9337-da95-4df5-9f49-a9e6c46ac060\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-kgf8h" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.530790 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-588nw\" (UniqueName: \"kubernetes.io/projected/f24be1df-be1a-4389-a3d5-7842b91f18b4-kube-api-access-588nw\") pod \"nmstate-handler-w8pzw\" (UID: \"f24be1df-be1a-4389-a3d5-7842b91f18b4\") " pod="openshift-nmstate/nmstate-handler-w8pzw" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.530821 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/f24be1df-be1a-4389-a3d5-7842b91f18b4-ovs-socket\") pod \"nmstate-handler-w8pzw\" (UID: \"f24be1df-be1a-4389-a3d5-7842b91f18b4\") " pod="openshift-nmstate/nmstate-handler-w8pzw" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.530843 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/bacf9337-da95-4df5-9f49-a9e6c46ac060-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-kgf8h\" (UID: \"bacf9337-da95-4df5-9f49-a9e6c46ac060\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-kgf8h" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.530884 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/f24be1df-be1a-4389-a3d5-7842b91f18b4-nmstate-lock\") pod \"nmstate-handler-w8pzw\" (UID: \"f24be1df-be1a-4389-a3d5-7842b91f18b4\") " pod="openshift-nmstate/nmstate-handler-w8pzw" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.530927 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/f24be1df-be1a-4389-a3d5-7842b91f18b4-dbus-socket\") pod \"nmstate-handler-w8pzw\" (UID: \"f24be1df-be1a-4389-a3d5-7842b91f18b4\") " pod="openshift-nmstate/nmstate-handler-w8pzw" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.530961 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/bacf9337-da95-4df5-9f49-a9e6c46ac060-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-kgf8h\" (UID: \"bacf9337-da95-4df5-9f49-a9e6c46ac060\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-kgf8h" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.531915 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/bacf9337-da95-4df5-9f49-a9e6c46ac060-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-kgf8h\" (UID: \"bacf9337-da95-4df5-9f49-a9e6c46ac060\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-kgf8h" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.532792 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/f24be1df-be1a-4389-a3d5-7842b91f18b4-ovs-socket\") pod \"nmstate-handler-w8pzw\" (UID: \"f24be1df-be1a-4389-a3d5-7842b91f18b4\") " pod="openshift-nmstate/nmstate-handler-w8pzw" Nov 26 05:37:11 crc kubenswrapper[4871]: E1126 05:37:11.532867 4871 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 26 05:37:11 crc kubenswrapper[4871]: E1126 05:37:11.532911 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bacf9337-da95-4df5-9f49-a9e6c46ac060-plugin-serving-cert podName:bacf9337-da95-4df5-9f49-a9e6c46ac060 nodeName:}" failed. No retries permitted until 2025-11-26 05:37:12.03289485 +0000 UTC m=+690.215946436 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/bacf9337-da95-4df5-9f49-a9e6c46ac060-plugin-serving-cert") pod "nmstate-console-plugin-5874bd7bc5-kgf8h" (UID: "bacf9337-da95-4df5-9f49-a9e6c46ac060") : secret "plugin-serving-cert" not found Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.533271 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/f24be1df-be1a-4389-a3d5-7842b91f18b4-nmstate-lock\") pod \"nmstate-handler-w8pzw\" (UID: \"f24be1df-be1a-4389-a3d5-7842b91f18b4\") " pod="openshift-nmstate/nmstate-handler-w8pzw" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.533777 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/f24be1df-be1a-4389-a3d5-7842b91f18b4-dbus-socket\") pod \"nmstate-handler-w8pzw\" (UID: \"f24be1df-be1a-4389-a3d5-7842b91f18b4\") " pod="openshift-nmstate/nmstate-handler-w8pzw" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.559909 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9x7s\" (UniqueName: \"kubernetes.io/projected/bacf9337-da95-4df5-9f49-a9e6c46ac060-kube-api-access-c9x7s\") pod \"nmstate-console-plugin-5874bd7bc5-kgf8h\" (UID: \"bacf9337-da95-4df5-9f49-a9e6c46ac060\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-kgf8h" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.565363 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-588nw\" (UniqueName: \"kubernetes.io/projected/f24be1df-be1a-4389-a3d5-7842b91f18b4-kube-api-access-588nw\") pod \"nmstate-handler-w8pzw\" (UID: \"f24be1df-be1a-4389-a3d5-7842b91f18b4\") " pod="openshift-nmstate/nmstate-handler-w8pzw" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.574194 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-z92tv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.616248 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-797655d6d8-grllv"] Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.618845 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ngv76" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.619907 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.642360 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b4858b80-880e-4924-ab8e-1b1d9360e00d-service-ca\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.642779 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b4858b80-880e-4924-ab8e-1b1d9360e00d-trusted-ca-bundle\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.642835 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njdx2\" (UniqueName: \"kubernetes.io/projected/b4858b80-880e-4924-ab8e-1b1d9360e00d-kube-api-access-njdx2\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.643078 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b4858b80-880e-4924-ab8e-1b1d9360e00d-oauth-serving-cert\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.643172 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b4858b80-880e-4924-ab8e-1b1d9360e00d-console-oauth-config\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.643207 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b4858b80-880e-4924-ab8e-1b1d9360e00d-console-serving-cert\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.643247 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b4858b80-880e-4924-ab8e-1b1d9360e00d-console-config\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.647806 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-797655d6d8-grllv"] Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.654699 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-w8pzw" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.748569 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b4858b80-880e-4924-ab8e-1b1d9360e00d-trusted-ca-bundle\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.748637 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njdx2\" (UniqueName: \"kubernetes.io/projected/b4858b80-880e-4924-ab8e-1b1d9360e00d-kube-api-access-njdx2\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.748668 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b4858b80-880e-4924-ab8e-1b1d9360e00d-oauth-serving-cert\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.748733 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b4858b80-880e-4924-ab8e-1b1d9360e00d-console-oauth-config\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.748758 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b4858b80-880e-4924-ab8e-1b1d9360e00d-console-serving-cert\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.748798 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b4858b80-880e-4924-ab8e-1b1d9360e00d-console-config\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.748894 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b4858b80-880e-4924-ab8e-1b1d9360e00d-service-ca\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.750817 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b4858b80-880e-4924-ab8e-1b1d9360e00d-service-ca\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.751643 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b4858b80-880e-4924-ab8e-1b1d9360e00d-trusted-ca-bundle\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.751963 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b4858b80-880e-4924-ab8e-1b1d9360e00d-console-config\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.752168 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b4858b80-880e-4924-ab8e-1b1d9360e00d-oauth-serving-cert\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.766018 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b4858b80-880e-4924-ab8e-1b1d9360e00d-console-serving-cert\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.766018 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b4858b80-880e-4924-ab8e-1b1d9360e00d-console-oauth-config\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.768408 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njdx2\" (UniqueName: \"kubernetes.io/projected/b4858b80-880e-4924-ab8e-1b1d9360e00d-kube-api-access-njdx2\") pod \"console-797655d6d8-grllv\" (UID: \"b4858b80-880e-4924-ab8e-1b1d9360e00d\") " pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.840114 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-z92tv"] Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.880044 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-ngv76"] Nov 26 05:37:11 crc kubenswrapper[4871]: W1126 05:37:11.886686 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod85b6422a_f943_4ced_8695_3d7f52f5f145.slice/crio-7aafa629bd163a01aa861b79fd447f525f3d7154f5e857474d1388ba92e31353 WatchSource:0}: Error finding container 7aafa629bd163a01aa861b79fd447f525f3d7154f5e857474d1388ba92e31353: Status 404 returned error can't find the container with id 7aafa629bd163a01aa861b79fd447f525f3d7154f5e857474d1388ba92e31353 Nov 26 05:37:11 crc kubenswrapper[4871]: I1126 05:37:11.947622 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:12 crc kubenswrapper[4871]: I1126 05:37:12.052297 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/bacf9337-da95-4df5-9f49-a9e6c46ac060-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-kgf8h\" (UID: \"bacf9337-da95-4df5-9f49-a9e6c46ac060\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-kgf8h" Nov 26 05:37:12 crc kubenswrapper[4871]: I1126 05:37:12.057999 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/bacf9337-da95-4df5-9f49-a9e6c46ac060-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-kgf8h\" (UID: \"bacf9337-da95-4df5-9f49-a9e6c46ac060\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-kgf8h" Nov 26 05:37:12 crc kubenswrapper[4871]: I1126 05:37:12.143698 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-797655d6d8-grllv"] Nov 26 05:37:12 crc kubenswrapper[4871]: W1126 05:37:12.146943 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb4858b80_880e_4924_ab8e_1b1d9360e00d.slice/crio-e81420bd6045afa4001c7ff9a7e4881098ac75b2795179910094f77ae15a01b5 WatchSource:0}: Error finding container e81420bd6045afa4001c7ff9a7e4881098ac75b2795179910094f77ae15a01b5: Status 404 returned error can't find the container with id e81420bd6045afa4001c7ff9a7e4881098ac75b2795179910094f77ae15a01b5 Nov 26 05:37:12 crc kubenswrapper[4871]: I1126 05:37:12.322790 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-kgf8h" Nov 26 05:37:12 crc kubenswrapper[4871]: I1126 05:37:12.491035 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-797655d6d8-grllv" event={"ID":"b4858b80-880e-4924-ab8e-1b1d9360e00d","Type":"ContainerStarted","Data":"1b21a1bf1a97b0c843aa30b508039dde572e620d6da3b51b881b7dbb1fc9a632"} Nov 26 05:37:12 crc kubenswrapper[4871]: I1126 05:37:12.491340 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-797655d6d8-grllv" event={"ID":"b4858b80-880e-4924-ab8e-1b1d9360e00d","Type":"ContainerStarted","Data":"e81420bd6045afa4001c7ff9a7e4881098ac75b2795179910094f77ae15a01b5"} Nov 26 05:37:12 crc kubenswrapper[4871]: I1126 05:37:12.492107 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-w8pzw" event={"ID":"f24be1df-be1a-4389-a3d5-7842b91f18b4","Type":"ContainerStarted","Data":"3b8a7f92f7af1a099fcecccc0e54afe1158ca78457022fef3865ba539de5e279"} Nov 26 05:37:12 crc kubenswrapper[4871]: I1126 05:37:12.493182 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-z92tv" event={"ID":"d6cb9226-08bd-44d7-97b7-ac75848ef5bd","Type":"ContainerStarted","Data":"55776920251f555b1d58f0eb9ce5e533826f843b551d94199ccf58c73c599109"} Nov 26 05:37:12 crc kubenswrapper[4871]: I1126 05:37:12.494059 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ngv76" event={"ID":"85b6422a-f943-4ced-8695-3d7f52f5f145","Type":"ContainerStarted","Data":"7aafa629bd163a01aa861b79fd447f525f3d7154f5e857474d1388ba92e31353"} Nov 26 05:37:12 crc kubenswrapper[4871]: I1126 05:37:12.520196 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-797655d6d8-grllv" podStartSLOduration=1.520181333 podStartE2EDuration="1.520181333s" podCreationTimestamp="2025-11-26 05:37:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:37:12.509683928 +0000 UTC m=+690.692735524" watchObservedRunningTime="2025-11-26 05:37:12.520181333 +0000 UTC m=+690.703232909" Nov 26 05:37:12 crc kubenswrapper[4871]: I1126 05:37:12.565754 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-kgf8h"] Nov 26 05:37:12 crc kubenswrapper[4871]: W1126 05:37:12.574758 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbacf9337_da95_4df5_9f49_a9e6c46ac060.slice/crio-f8621008828bfcbf4f9ef59c9b87073d3f2b4e7fd36e4069cc210843bd392ec5 WatchSource:0}: Error finding container f8621008828bfcbf4f9ef59c9b87073d3f2b4e7fd36e4069cc210843bd392ec5: Status 404 returned error can't find the container with id f8621008828bfcbf4f9ef59c9b87073d3f2b4e7fd36e4069cc210843bd392ec5 Nov 26 05:37:13 crc kubenswrapper[4871]: I1126 05:37:13.500516 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-kgf8h" event={"ID":"bacf9337-da95-4df5-9f49-a9e6c46ac060","Type":"ContainerStarted","Data":"f8621008828bfcbf4f9ef59c9b87073d3f2b4e7fd36e4069cc210843bd392ec5"} Nov 26 05:37:14 crc kubenswrapper[4871]: I1126 05:37:14.515807 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-z92tv" event={"ID":"d6cb9226-08bd-44d7-97b7-ac75848ef5bd","Type":"ContainerStarted","Data":"ec251d55a0f600eb44b5a68dd38f51ea60d1460107439f9bb5a364036927a94f"} Nov 26 05:37:14 crc kubenswrapper[4871]: I1126 05:37:14.516035 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ngv76" event={"ID":"85b6422a-f943-4ced-8695-3d7f52f5f145","Type":"ContainerStarted","Data":"5cf19321e05fe72468a63314312dd649041403d1fce45eab225e14b72af3fb40"} Nov 26 05:37:14 crc kubenswrapper[4871]: I1126 05:37:14.516056 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ngv76" Nov 26 05:37:14 crc kubenswrapper[4871]: I1126 05:37:14.516065 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-w8pzw" event={"ID":"f24be1df-be1a-4389-a3d5-7842b91f18b4","Type":"ContainerStarted","Data":"9f932f8ac0aa26f956c67a5266b30c66f9f65076c96eb7aebb1100dc46e076b2"} Nov 26 05:37:14 crc kubenswrapper[4871]: I1126 05:37:14.516076 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-w8pzw" Nov 26 05:37:14 crc kubenswrapper[4871]: I1126 05:37:14.525360 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ngv76" podStartSLOduration=1.169573436 podStartE2EDuration="3.525345461s" podCreationTimestamp="2025-11-26 05:37:11 +0000 UTC" firstStartedPulling="2025-11-26 05:37:11.888803971 +0000 UTC m=+690.071855557" lastFinishedPulling="2025-11-26 05:37:14.244575956 +0000 UTC m=+692.427627582" observedRunningTime="2025-11-26 05:37:14.523559136 +0000 UTC m=+692.706610712" watchObservedRunningTime="2025-11-26 05:37:14.525345461 +0000 UTC m=+692.708397047" Nov 26 05:37:14 crc kubenswrapper[4871]: I1126 05:37:14.540121 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-w8pzw" podStartSLOduration=0.998837108 podStartE2EDuration="3.540106624s" podCreationTimestamp="2025-11-26 05:37:11 +0000 UTC" firstStartedPulling="2025-11-26 05:37:11.696599121 +0000 UTC m=+689.879650707" lastFinishedPulling="2025-11-26 05:37:14.237868637 +0000 UTC m=+692.420920223" observedRunningTime="2025-11-26 05:37:14.539254612 +0000 UTC m=+692.722306198" watchObservedRunningTime="2025-11-26 05:37:14.540106624 +0000 UTC m=+692.723158210" Nov 26 05:37:16 crc kubenswrapper[4871]: I1126 05:37:16.528850 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-kgf8h" event={"ID":"bacf9337-da95-4df5-9f49-a9e6c46ac060","Type":"ContainerStarted","Data":"e15b4baba66da34fa38beee9db0b84f45400f14221f9d01a23bd7074218efd8f"} Nov 26 05:37:16 crc kubenswrapper[4871]: I1126 05:37:16.545001 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-kgf8h" podStartSLOduration=2.696323501 podStartE2EDuration="5.544974293s" podCreationTimestamp="2025-11-26 05:37:11 +0000 UTC" firstStartedPulling="2025-11-26 05:37:12.576375171 +0000 UTC m=+690.759426767" lastFinishedPulling="2025-11-26 05:37:15.425025973 +0000 UTC m=+693.608077559" observedRunningTime="2025-11-26 05:37:16.544271936 +0000 UTC m=+694.727323522" watchObservedRunningTime="2025-11-26 05:37:16.544974293 +0000 UTC m=+694.728025919" Nov 26 05:37:18 crc kubenswrapper[4871]: I1126 05:37:18.545151 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-z92tv" event={"ID":"d6cb9226-08bd-44d7-97b7-ac75848ef5bd","Type":"ContainerStarted","Data":"2643ab697ea72ec6a3db57391fdc8fe06ca44a35c5b8632e2d345beb0d431d32"} Nov 26 05:37:18 crc kubenswrapper[4871]: I1126 05:37:18.567924 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-z92tv" podStartSLOduration=1.9749028960000001 podStartE2EDuration="7.567898528s" podCreationTimestamp="2025-11-26 05:37:11 +0000 UTC" firstStartedPulling="2025-11-26 05:37:11.850013422 +0000 UTC m=+690.033065008" lastFinishedPulling="2025-11-26 05:37:17.443009044 +0000 UTC m=+695.626060640" observedRunningTime="2025-11-26 05:37:18.567202391 +0000 UTC m=+696.750254007" watchObservedRunningTime="2025-11-26 05:37:18.567898528 +0000 UTC m=+696.750950154" Nov 26 05:37:21 crc kubenswrapper[4871]: I1126 05:37:21.700024 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-w8pzw" Nov 26 05:37:21 crc kubenswrapper[4871]: I1126 05:37:21.947792 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:21 crc kubenswrapper[4871]: I1126 05:37:21.947864 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:21 crc kubenswrapper[4871]: I1126 05:37:21.956758 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:22 crc kubenswrapper[4871]: I1126 05:37:22.581039 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-797655d6d8-grllv" Nov 26 05:37:22 crc kubenswrapper[4871]: I1126 05:37:22.655769 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-h5qx5"] Nov 26 05:37:23 crc kubenswrapper[4871]: I1126 05:37:23.614994 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:37:23 crc kubenswrapper[4871]: I1126 05:37:23.615428 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:37:31 crc kubenswrapper[4871]: I1126 05:37:31.625318 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-ngv76" Nov 26 05:37:47 crc kubenswrapper[4871]: I1126 05:37:47.709855 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-h5qx5" podUID="65ed678d-1457-46e2-a59d-1b05e7bbee8c" containerName="console" containerID="cri-o://f0bd8a75b07e7762bd46d4350707a542d76fa93c75c8d1f1b9d1f5d31800a22d" gracePeriod=15 Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.139191 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-h5qx5_65ed678d-1457-46e2-a59d-1b05e7bbee8c/console/0.log" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.139549 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.231282 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk"] Nov 26 05:37:48 crc kubenswrapper[4871]: E1126 05:37:48.231838 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65ed678d-1457-46e2-a59d-1b05e7bbee8c" containerName="console" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.231928 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="65ed678d-1457-46e2-a59d-1b05e7bbee8c" containerName="console" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.232191 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="65ed678d-1457-46e2-a59d-1b05e7bbee8c" containerName="console" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.233250 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.235241 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.264944 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-trusted-ca-bundle\") pod \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.265027 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/65ed678d-1457-46e2-a59d-1b05e7bbee8c-console-oauth-config\") pod \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.265054 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-service-ca\") pod \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.265104 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dmq95\" (UniqueName: \"kubernetes.io/projected/65ed678d-1457-46e2-a59d-1b05e7bbee8c-kube-api-access-dmq95\") pod \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.265144 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/65ed678d-1457-46e2-a59d-1b05e7bbee8c-console-serving-cert\") pod \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.265169 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-console-config\") pod \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.265207 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-oauth-serving-cert\") pod \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\" (UID: \"65ed678d-1457-46e2-a59d-1b05e7bbee8c\") " Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.266294 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "65ed678d-1457-46e2-a59d-1b05e7bbee8c" (UID: "65ed678d-1457-46e2-a59d-1b05e7bbee8c"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.266305 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "65ed678d-1457-46e2-a59d-1b05e7bbee8c" (UID: "65ed678d-1457-46e2-a59d-1b05e7bbee8c"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.267429 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-service-ca" (OuterVolumeSpecName: "service-ca") pod "65ed678d-1457-46e2-a59d-1b05e7bbee8c" (UID: "65ed678d-1457-46e2-a59d-1b05e7bbee8c"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.267856 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-console-config" (OuterVolumeSpecName: "console-config") pod "65ed678d-1457-46e2-a59d-1b05e7bbee8c" (UID: "65ed678d-1457-46e2-a59d-1b05e7bbee8c"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.271784 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65ed678d-1457-46e2-a59d-1b05e7bbee8c-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "65ed678d-1457-46e2-a59d-1b05e7bbee8c" (UID: "65ed678d-1457-46e2-a59d-1b05e7bbee8c"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.272436 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65ed678d-1457-46e2-a59d-1b05e7bbee8c-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "65ed678d-1457-46e2-a59d-1b05e7bbee8c" (UID: "65ed678d-1457-46e2-a59d-1b05e7bbee8c"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.272731 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65ed678d-1457-46e2-a59d-1b05e7bbee8c-kube-api-access-dmq95" (OuterVolumeSpecName: "kube-api-access-dmq95") pod "65ed678d-1457-46e2-a59d-1b05e7bbee8c" (UID: "65ed678d-1457-46e2-a59d-1b05e7bbee8c"). InnerVolumeSpecName "kube-api-access-dmq95". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.278965 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk"] Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.366347 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/48fae954-7c94-4755-8e57-c910119b6089-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk\" (UID: \"48fae954-7c94-4755-8e57-c910119b6089\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.366800 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/48fae954-7c94-4755-8e57-c910119b6089-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk\" (UID: \"48fae954-7c94-4755-8e57-c910119b6089\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.367014 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbnf5\" (UniqueName: \"kubernetes.io/projected/48fae954-7c94-4755-8e57-c910119b6089-kube-api-access-dbnf5\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk\" (UID: \"48fae954-7c94-4755-8e57-c910119b6089\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.367180 4871 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.367204 4871 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.367217 4871 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/65ed678d-1457-46e2-a59d-1b05e7bbee8c-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.367230 4871 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.367242 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dmq95\" (UniqueName: \"kubernetes.io/projected/65ed678d-1457-46e2-a59d-1b05e7bbee8c-kube-api-access-dmq95\") on node \"crc\" DevicePath \"\"" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.367255 4871 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/65ed678d-1457-46e2-a59d-1b05e7bbee8c-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.367267 4871 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/65ed678d-1457-46e2-a59d-1b05e7bbee8c-console-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.468595 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/48fae954-7c94-4755-8e57-c910119b6089-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk\" (UID: \"48fae954-7c94-4755-8e57-c910119b6089\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.468918 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/48fae954-7c94-4755-8e57-c910119b6089-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk\" (UID: \"48fae954-7c94-4755-8e57-c910119b6089\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.469131 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbnf5\" (UniqueName: \"kubernetes.io/projected/48fae954-7c94-4755-8e57-c910119b6089-kube-api-access-dbnf5\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk\" (UID: \"48fae954-7c94-4755-8e57-c910119b6089\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.469209 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/48fae954-7c94-4755-8e57-c910119b6089-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk\" (UID: \"48fae954-7c94-4755-8e57-c910119b6089\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.469576 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/48fae954-7c94-4755-8e57-c910119b6089-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk\" (UID: \"48fae954-7c94-4755-8e57-c910119b6089\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.487952 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbnf5\" (UniqueName: \"kubernetes.io/projected/48fae954-7c94-4755-8e57-c910119b6089-kube-api-access-dbnf5\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk\" (UID: \"48fae954-7c94-4755-8e57-c910119b6089\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.547482 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.752739 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk"] Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.773819 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk" event={"ID":"48fae954-7c94-4755-8e57-c910119b6089","Type":"ContainerStarted","Data":"2bca03c2f312cae309f600a67aa55da6b46228fc2cff1e92e7087eaf28094311"} Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.777483 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-h5qx5_65ed678d-1457-46e2-a59d-1b05e7bbee8c/console/0.log" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.777642 4871 generic.go:334] "Generic (PLEG): container finished" podID="65ed678d-1457-46e2-a59d-1b05e7bbee8c" containerID="f0bd8a75b07e7762bd46d4350707a542d76fa93c75c8d1f1b9d1f5d31800a22d" exitCode=2 Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.777727 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-h5qx5" event={"ID":"65ed678d-1457-46e2-a59d-1b05e7bbee8c","Type":"ContainerDied","Data":"f0bd8a75b07e7762bd46d4350707a542d76fa93c75c8d1f1b9d1f5d31800a22d"} Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.778033 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-h5qx5" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.778573 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-h5qx5" event={"ID":"65ed678d-1457-46e2-a59d-1b05e7bbee8c","Type":"ContainerDied","Data":"476b0f3d190fb318427a04881c97d5d3021290588e6ea5689a0b7ce02e426b29"} Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.778661 4871 scope.go:117] "RemoveContainer" containerID="f0bd8a75b07e7762bd46d4350707a542d76fa93c75c8d1f1b9d1f5d31800a22d" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.796423 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-h5qx5"] Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.806165 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-h5qx5"] Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.810101 4871 scope.go:117] "RemoveContainer" containerID="f0bd8a75b07e7762bd46d4350707a542d76fa93c75c8d1f1b9d1f5d31800a22d" Nov 26 05:37:48 crc kubenswrapper[4871]: E1126 05:37:48.811852 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0bd8a75b07e7762bd46d4350707a542d76fa93c75c8d1f1b9d1f5d31800a22d\": container with ID starting with f0bd8a75b07e7762bd46d4350707a542d76fa93c75c8d1f1b9d1f5d31800a22d not found: ID does not exist" containerID="f0bd8a75b07e7762bd46d4350707a542d76fa93c75c8d1f1b9d1f5d31800a22d" Nov 26 05:37:48 crc kubenswrapper[4871]: I1126 05:37:48.811942 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0bd8a75b07e7762bd46d4350707a542d76fa93c75c8d1f1b9d1f5d31800a22d"} err="failed to get container status \"f0bd8a75b07e7762bd46d4350707a542d76fa93c75c8d1f1b9d1f5d31800a22d\": rpc error: code = NotFound desc = could not find container \"f0bd8a75b07e7762bd46d4350707a542d76fa93c75c8d1f1b9d1f5d31800a22d\": container with ID starting with f0bd8a75b07e7762bd46d4350707a542d76fa93c75c8d1f1b9d1f5d31800a22d not found: ID does not exist" Nov 26 05:37:49 crc kubenswrapper[4871]: I1126 05:37:49.785156 4871 generic.go:334] "Generic (PLEG): container finished" podID="48fae954-7c94-4755-8e57-c910119b6089" containerID="41190e1647e6c394ec42914e358a791fc90847e5bc982b326fa3ade5d5b5fe8c" exitCode=0 Nov 26 05:37:49 crc kubenswrapper[4871]: I1126 05:37:49.785198 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk" event={"ID":"48fae954-7c94-4755-8e57-c910119b6089","Type":"ContainerDied","Data":"41190e1647e6c394ec42914e358a791fc90847e5bc982b326fa3ade5d5b5fe8c"} Nov 26 05:37:50 crc kubenswrapper[4871]: I1126 05:37:50.520934 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65ed678d-1457-46e2-a59d-1b05e7bbee8c" path="/var/lib/kubelet/pods/65ed678d-1457-46e2-a59d-1b05e7bbee8c/volumes" Nov 26 05:37:51 crc kubenswrapper[4871]: I1126 05:37:51.803685 4871 generic.go:334] "Generic (PLEG): container finished" podID="48fae954-7c94-4755-8e57-c910119b6089" containerID="edbcf702eeece4150e3537fe57ad42027a955f716abe8c4cfc1cd860bfdfbe34" exitCode=0 Nov 26 05:37:51 crc kubenswrapper[4871]: I1126 05:37:51.803893 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk" event={"ID":"48fae954-7c94-4755-8e57-c910119b6089","Type":"ContainerDied","Data":"edbcf702eeece4150e3537fe57ad42027a955f716abe8c4cfc1cd860bfdfbe34"} Nov 26 05:37:52 crc kubenswrapper[4871]: I1126 05:37:52.812327 4871 generic.go:334] "Generic (PLEG): container finished" podID="48fae954-7c94-4755-8e57-c910119b6089" containerID="9566b0240c79cc96bd1034f22cbbe9a03452116b1c9c2d4596053046b61383a3" exitCode=0 Nov 26 05:37:52 crc kubenswrapper[4871]: I1126 05:37:52.812469 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk" event={"ID":"48fae954-7c94-4755-8e57-c910119b6089","Type":"ContainerDied","Data":"9566b0240c79cc96bd1034f22cbbe9a03452116b1c9c2d4596053046b61383a3"} Nov 26 05:37:53 crc kubenswrapper[4871]: I1126 05:37:53.615284 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:37:53 crc kubenswrapper[4871]: I1126 05:37:53.615371 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:37:54 crc kubenswrapper[4871]: I1126 05:37:54.122313 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk" Nov 26 05:37:54 crc kubenswrapper[4871]: I1126 05:37:54.158666 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/48fae954-7c94-4755-8e57-c910119b6089-bundle\") pod \"48fae954-7c94-4755-8e57-c910119b6089\" (UID: \"48fae954-7c94-4755-8e57-c910119b6089\") " Nov 26 05:37:54 crc kubenswrapper[4871]: I1126 05:37:54.158746 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/48fae954-7c94-4755-8e57-c910119b6089-util\") pod \"48fae954-7c94-4755-8e57-c910119b6089\" (UID: \"48fae954-7c94-4755-8e57-c910119b6089\") " Nov 26 05:37:54 crc kubenswrapper[4871]: I1126 05:37:54.158791 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbnf5\" (UniqueName: \"kubernetes.io/projected/48fae954-7c94-4755-8e57-c910119b6089-kube-api-access-dbnf5\") pod \"48fae954-7c94-4755-8e57-c910119b6089\" (UID: \"48fae954-7c94-4755-8e57-c910119b6089\") " Nov 26 05:37:54 crc kubenswrapper[4871]: I1126 05:37:54.160369 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/48fae954-7c94-4755-8e57-c910119b6089-bundle" (OuterVolumeSpecName: "bundle") pod "48fae954-7c94-4755-8e57-c910119b6089" (UID: "48fae954-7c94-4755-8e57-c910119b6089"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:37:54 crc kubenswrapper[4871]: I1126 05:37:54.167491 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48fae954-7c94-4755-8e57-c910119b6089-kube-api-access-dbnf5" (OuterVolumeSpecName: "kube-api-access-dbnf5") pod "48fae954-7c94-4755-8e57-c910119b6089" (UID: "48fae954-7c94-4755-8e57-c910119b6089"). InnerVolumeSpecName "kube-api-access-dbnf5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:37:54 crc kubenswrapper[4871]: I1126 05:37:54.260235 4871 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/48fae954-7c94-4755-8e57-c910119b6089-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:37:54 crc kubenswrapper[4871]: I1126 05:37:54.260573 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbnf5\" (UniqueName: \"kubernetes.io/projected/48fae954-7c94-4755-8e57-c910119b6089-kube-api-access-dbnf5\") on node \"crc\" DevicePath \"\"" Nov 26 05:37:54 crc kubenswrapper[4871]: I1126 05:37:54.484391 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/48fae954-7c94-4755-8e57-c910119b6089-util" (OuterVolumeSpecName: "util") pod "48fae954-7c94-4755-8e57-c910119b6089" (UID: "48fae954-7c94-4755-8e57-c910119b6089"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:37:54 crc kubenswrapper[4871]: I1126 05:37:54.563784 4871 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/48fae954-7c94-4755-8e57-c910119b6089-util\") on node \"crc\" DevicePath \"\"" Nov 26 05:37:54 crc kubenswrapper[4871]: I1126 05:37:54.828350 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk" event={"ID":"48fae954-7c94-4755-8e57-c910119b6089","Type":"ContainerDied","Data":"2bca03c2f312cae309f600a67aa55da6b46228fc2cff1e92e7087eaf28094311"} Nov 26 05:37:54 crc kubenswrapper[4871]: I1126 05:37:54.828403 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2bca03c2f312cae309f600a67aa55da6b46228fc2cff1e92e7087eaf28094311" Nov 26 05:37:54 crc kubenswrapper[4871]: I1126 05:37:54.828457 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.239835 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g"] Nov 26 05:38:07 crc kubenswrapper[4871]: E1126 05:38:07.240399 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48fae954-7c94-4755-8e57-c910119b6089" containerName="pull" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.240410 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="48fae954-7c94-4755-8e57-c910119b6089" containerName="pull" Nov 26 05:38:07 crc kubenswrapper[4871]: E1126 05:38:07.240422 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48fae954-7c94-4755-8e57-c910119b6089" containerName="util" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.240428 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="48fae954-7c94-4755-8e57-c910119b6089" containerName="util" Nov 26 05:38:07 crc kubenswrapper[4871]: E1126 05:38:07.240441 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48fae954-7c94-4755-8e57-c910119b6089" containerName="extract" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.240452 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="48fae954-7c94-4755-8e57-c910119b6089" containerName="extract" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.240632 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="48fae954-7c94-4755-8e57-c910119b6089" containerName="extract" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.241024 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.241760 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0f2d5628-2ad3-400c-bc77-b0251683a83a-webhook-cert\") pod \"metallb-operator-controller-manager-645b9949f7-48k8g\" (UID: \"0f2d5628-2ad3-400c-bc77-b0251683a83a\") " pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.241867 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnvh6\" (UniqueName: \"kubernetes.io/projected/0f2d5628-2ad3-400c-bc77-b0251683a83a-kube-api-access-wnvh6\") pod \"metallb-operator-controller-manager-645b9949f7-48k8g\" (UID: \"0f2d5628-2ad3-400c-bc77-b0251683a83a\") " pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.241940 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0f2d5628-2ad3-400c-bc77-b0251683a83a-apiservice-cert\") pod \"metallb-operator-controller-manager-645b9949f7-48k8g\" (UID: \"0f2d5628-2ad3-400c-bc77-b0251683a83a\") " pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.243028 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.243208 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-79qlw" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.244119 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.244129 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.246487 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.256459 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g"] Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.343048 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0f2d5628-2ad3-400c-bc77-b0251683a83a-webhook-cert\") pod \"metallb-operator-controller-manager-645b9949f7-48k8g\" (UID: \"0f2d5628-2ad3-400c-bc77-b0251683a83a\") " pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.343117 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnvh6\" (UniqueName: \"kubernetes.io/projected/0f2d5628-2ad3-400c-bc77-b0251683a83a-kube-api-access-wnvh6\") pod \"metallb-operator-controller-manager-645b9949f7-48k8g\" (UID: \"0f2d5628-2ad3-400c-bc77-b0251683a83a\") " pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.343156 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0f2d5628-2ad3-400c-bc77-b0251683a83a-apiservice-cert\") pod \"metallb-operator-controller-manager-645b9949f7-48k8g\" (UID: \"0f2d5628-2ad3-400c-bc77-b0251683a83a\") " pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.348802 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0f2d5628-2ad3-400c-bc77-b0251683a83a-webhook-cert\") pod \"metallb-operator-controller-manager-645b9949f7-48k8g\" (UID: \"0f2d5628-2ad3-400c-bc77-b0251683a83a\") " pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.358834 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0f2d5628-2ad3-400c-bc77-b0251683a83a-apiservice-cert\") pod \"metallb-operator-controller-manager-645b9949f7-48k8g\" (UID: \"0f2d5628-2ad3-400c-bc77-b0251683a83a\") " pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.365348 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnvh6\" (UniqueName: \"kubernetes.io/projected/0f2d5628-2ad3-400c-bc77-b0251683a83a-kube-api-access-wnvh6\") pod \"metallb-operator-controller-manager-645b9949f7-48k8g\" (UID: \"0f2d5628-2ad3-400c-bc77-b0251683a83a\") " pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.554448 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.685682 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-69c6746fd5-pkb65"] Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.686887 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-69c6746fd5-pkb65" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.689932 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-p7dxk" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.690131 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.690261 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.698575 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-69c6746fd5-pkb65"] Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.811274 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g"] Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.853296 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27zwv\" (UniqueName: \"kubernetes.io/projected/35333648-4e74-4c66-803e-091d7d5673ca-kube-api-access-27zwv\") pod \"metallb-operator-webhook-server-69c6746fd5-pkb65\" (UID: \"35333648-4e74-4c66-803e-091d7d5673ca\") " pod="metallb-system/metallb-operator-webhook-server-69c6746fd5-pkb65" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.853891 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/35333648-4e74-4c66-803e-091d7d5673ca-webhook-cert\") pod \"metallb-operator-webhook-server-69c6746fd5-pkb65\" (UID: \"35333648-4e74-4c66-803e-091d7d5673ca\") " pod="metallb-system/metallb-operator-webhook-server-69c6746fd5-pkb65" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.853949 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/35333648-4e74-4c66-803e-091d7d5673ca-apiservice-cert\") pod \"metallb-operator-webhook-server-69c6746fd5-pkb65\" (UID: \"35333648-4e74-4c66-803e-091d7d5673ca\") " pod="metallb-system/metallb-operator-webhook-server-69c6746fd5-pkb65" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.911394 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" event={"ID":"0f2d5628-2ad3-400c-bc77-b0251683a83a","Type":"ContainerStarted","Data":"520da3356a6ffffa4b46589d0a8783d916c1d5b4d0d1d66cf107673e96fa2b98"} Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.955432 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27zwv\" (UniqueName: \"kubernetes.io/projected/35333648-4e74-4c66-803e-091d7d5673ca-kube-api-access-27zwv\") pod \"metallb-operator-webhook-server-69c6746fd5-pkb65\" (UID: \"35333648-4e74-4c66-803e-091d7d5673ca\") " pod="metallb-system/metallb-operator-webhook-server-69c6746fd5-pkb65" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.955543 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/35333648-4e74-4c66-803e-091d7d5673ca-webhook-cert\") pod \"metallb-operator-webhook-server-69c6746fd5-pkb65\" (UID: \"35333648-4e74-4c66-803e-091d7d5673ca\") " pod="metallb-system/metallb-operator-webhook-server-69c6746fd5-pkb65" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.955591 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/35333648-4e74-4c66-803e-091d7d5673ca-apiservice-cert\") pod \"metallb-operator-webhook-server-69c6746fd5-pkb65\" (UID: \"35333648-4e74-4c66-803e-091d7d5673ca\") " pod="metallb-system/metallb-operator-webhook-server-69c6746fd5-pkb65" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.959400 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/35333648-4e74-4c66-803e-091d7d5673ca-apiservice-cert\") pod \"metallb-operator-webhook-server-69c6746fd5-pkb65\" (UID: \"35333648-4e74-4c66-803e-091d7d5673ca\") " pod="metallb-system/metallb-operator-webhook-server-69c6746fd5-pkb65" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.963733 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/35333648-4e74-4c66-803e-091d7d5673ca-webhook-cert\") pod \"metallb-operator-webhook-server-69c6746fd5-pkb65\" (UID: \"35333648-4e74-4c66-803e-091d7d5673ca\") " pod="metallb-system/metallb-operator-webhook-server-69c6746fd5-pkb65" Nov 26 05:38:07 crc kubenswrapper[4871]: I1126 05:38:07.975026 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27zwv\" (UniqueName: \"kubernetes.io/projected/35333648-4e74-4c66-803e-091d7d5673ca-kube-api-access-27zwv\") pod \"metallb-operator-webhook-server-69c6746fd5-pkb65\" (UID: \"35333648-4e74-4c66-803e-091d7d5673ca\") " pod="metallb-system/metallb-operator-webhook-server-69c6746fd5-pkb65" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.043779 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-69c6746fd5-pkb65" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.101748 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-24p5x"] Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.101946 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" podUID="d2225428-c79d-4406-9238-432797b4fa99" containerName="controller-manager" containerID="cri-o://ccb3768cec333d72897dc5ba74bca2476110333f7ec4d786b27549de3f5038bd" gracePeriod=30 Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.179689 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6"] Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.180151 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" podUID="10c1cc5f-f24d-434a-a9dd-0e0d8d22c153" containerName="route-controller-manager" containerID="cri-o://aa7e8778c633ed1ed1d867e08a5346745733e8ab7f33988ddcf27636190a2cb7" gracePeriod=30 Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.331840 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-69c6746fd5-pkb65"] Nov 26 05:38:08 crc kubenswrapper[4871]: W1126 05:38:08.368817 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod35333648_4e74_4c66_803e_091d7d5673ca.slice/crio-25e3ddf58fe49b2935bbc64f66f2203ebae069cce2f3025ca37ab09a20bd06e1 WatchSource:0}: Error finding container 25e3ddf58fe49b2935bbc64f66f2203ebae069cce2f3025ca37ab09a20bd06e1: Status 404 returned error can't find the container with id 25e3ddf58fe49b2935bbc64f66f2203ebae069cce2f3025ca37ab09a20bd06e1 Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.711815 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.764173 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.868647 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d2225428-c79d-4406-9238-432797b4fa99-serving-cert\") pod \"d2225428-c79d-4406-9238-432797b4fa99\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.868734 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-client-ca\") pod \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\" (UID: \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\") " Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.868770 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tsls8\" (UniqueName: \"kubernetes.io/projected/d2225428-c79d-4406-9238-432797b4fa99-kube-api-access-tsls8\") pod \"d2225428-c79d-4406-9238-432797b4fa99\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.868796 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-serving-cert\") pod \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\" (UID: \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\") " Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.868824 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d2225428-c79d-4406-9238-432797b4fa99-proxy-ca-bundles\") pod \"d2225428-c79d-4406-9238-432797b4fa99\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.868862 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hbtkx\" (UniqueName: \"kubernetes.io/projected/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-kube-api-access-hbtkx\") pod \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\" (UID: \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\") " Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.868915 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-config\") pod \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\" (UID: \"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153\") " Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.868941 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d2225428-c79d-4406-9238-432797b4fa99-client-ca\") pod \"d2225428-c79d-4406-9238-432797b4fa99\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.868991 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2225428-c79d-4406-9238-432797b4fa99-config\") pod \"d2225428-c79d-4406-9238-432797b4fa99\" (UID: \"d2225428-c79d-4406-9238-432797b4fa99\") " Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.869537 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2225428-c79d-4406-9238-432797b4fa99-client-ca" (OuterVolumeSpecName: "client-ca") pod "d2225428-c79d-4406-9238-432797b4fa99" (UID: "d2225428-c79d-4406-9238-432797b4fa99"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.869576 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2225428-c79d-4406-9238-432797b4fa99-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "d2225428-c79d-4406-9238-432797b4fa99" (UID: "d2225428-c79d-4406-9238-432797b4fa99"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.869633 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2225428-c79d-4406-9238-432797b4fa99-config" (OuterVolumeSpecName: "config") pod "d2225428-c79d-4406-9238-432797b4fa99" (UID: "d2225428-c79d-4406-9238-432797b4fa99"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.869640 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-config" (OuterVolumeSpecName: "config") pod "10c1cc5f-f24d-434a-a9dd-0e0d8d22c153" (UID: "10c1cc5f-f24d-434a-a9dd-0e0d8d22c153"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.870056 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-client-ca" (OuterVolumeSpecName: "client-ca") pod "10c1cc5f-f24d-434a-a9dd-0e0d8d22c153" (UID: "10c1cc5f-f24d-434a-a9dd-0e0d8d22c153"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.876317 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2225428-c79d-4406-9238-432797b4fa99-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d2225428-c79d-4406-9238-432797b4fa99" (UID: "d2225428-c79d-4406-9238-432797b4fa99"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.876636 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "10c1cc5f-f24d-434a-a9dd-0e0d8d22c153" (UID: "10c1cc5f-f24d-434a-a9dd-0e0d8d22c153"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.877050 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2225428-c79d-4406-9238-432797b4fa99-kube-api-access-tsls8" (OuterVolumeSpecName: "kube-api-access-tsls8") pod "d2225428-c79d-4406-9238-432797b4fa99" (UID: "d2225428-c79d-4406-9238-432797b4fa99"). InnerVolumeSpecName "kube-api-access-tsls8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.878822 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-kube-api-access-hbtkx" (OuterVolumeSpecName: "kube-api-access-hbtkx") pod "10c1cc5f-f24d-434a-a9dd-0e0d8d22c153" (UID: "10c1cc5f-f24d-434a-a9dd-0e0d8d22c153"). InnerVolumeSpecName "kube-api-access-hbtkx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.918622 4871 generic.go:334] "Generic (PLEG): container finished" podID="d2225428-c79d-4406-9238-432797b4fa99" containerID="ccb3768cec333d72897dc5ba74bca2476110333f7ec4d786b27549de3f5038bd" exitCode=0 Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.918674 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.918695 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" event={"ID":"d2225428-c79d-4406-9238-432797b4fa99","Type":"ContainerDied","Data":"ccb3768cec333d72897dc5ba74bca2476110333f7ec4d786b27549de3f5038bd"} Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.918725 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-24p5x" event={"ID":"d2225428-c79d-4406-9238-432797b4fa99","Type":"ContainerDied","Data":"3466e5288e80802867c08c8efcbcd3ab215d8fc4bed70d78f4226339d06fccc8"} Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.918742 4871 scope.go:117] "RemoveContainer" containerID="ccb3768cec333d72897dc5ba74bca2476110333f7ec4d786b27549de3f5038bd" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.919959 4871 generic.go:334] "Generic (PLEG): container finished" podID="10c1cc5f-f24d-434a-a9dd-0e0d8d22c153" containerID="aa7e8778c633ed1ed1d867e08a5346745733e8ab7f33988ddcf27636190a2cb7" exitCode=0 Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.920003 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" event={"ID":"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153","Type":"ContainerDied","Data":"aa7e8778c633ed1ed1d867e08a5346745733e8ab7f33988ddcf27636190a2cb7"} Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.920019 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" event={"ID":"10c1cc5f-f24d-434a-a9dd-0e0d8d22c153","Type":"ContainerDied","Data":"f5b62bfabe3d1b438ef66fb79aa859b37e7683e3450be6f7c2f76cd69d086bcb"} Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.920018 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.921227 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-69c6746fd5-pkb65" event={"ID":"35333648-4e74-4c66-803e-091d7d5673ca","Type":"ContainerStarted","Data":"25e3ddf58fe49b2935bbc64f66f2203ebae069cce2f3025ca37ab09a20bd06e1"} Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.933173 4871 scope.go:117] "RemoveContainer" containerID="ccb3768cec333d72897dc5ba74bca2476110333f7ec4d786b27549de3f5038bd" Nov 26 05:38:08 crc kubenswrapper[4871]: E1126 05:38:08.933572 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ccb3768cec333d72897dc5ba74bca2476110333f7ec4d786b27549de3f5038bd\": container with ID starting with ccb3768cec333d72897dc5ba74bca2476110333f7ec4d786b27549de3f5038bd not found: ID does not exist" containerID="ccb3768cec333d72897dc5ba74bca2476110333f7ec4d786b27549de3f5038bd" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.933608 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ccb3768cec333d72897dc5ba74bca2476110333f7ec4d786b27549de3f5038bd"} err="failed to get container status \"ccb3768cec333d72897dc5ba74bca2476110333f7ec4d786b27549de3f5038bd\": rpc error: code = NotFound desc = could not find container \"ccb3768cec333d72897dc5ba74bca2476110333f7ec4d786b27549de3f5038bd\": container with ID starting with ccb3768cec333d72897dc5ba74bca2476110333f7ec4d786b27549de3f5038bd not found: ID does not exist" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.933632 4871 scope.go:117] "RemoveContainer" containerID="aa7e8778c633ed1ed1d867e08a5346745733e8ab7f33988ddcf27636190a2cb7" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.950013 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-24p5x"] Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.952089 4871 scope.go:117] "RemoveContainer" containerID="aa7e8778c633ed1ed1d867e08a5346745733e8ab7f33988ddcf27636190a2cb7" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.954712 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-24p5x"] Nov 26 05:38:08 crc kubenswrapper[4871]: E1126 05:38:08.965164 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa7e8778c633ed1ed1d867e08a5346745733e8ab7f33988ddcf27636190a2cb7\": container with ID starting with aa7e8778c633ed1ed1d867e08a5346745733e8ab7f33988ddcf27636190a2cb7 not found: ID does not exist" containerID="aa7e8778c633ed1ed1d867e08a5346745733e8ab7f33988ddcf27636190a2cb7" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.965255 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa7e8778c633ed1ed1d867e08a5346745733e8ab7f33988ddcf27636190a2cb7"} err="failed to get container status \"aa7e8778c633ed1ed1d867e08a5346745733e8ab7f33988ddcf27636190a2cb7\": rpc error: code = NotFound desc = could not find container \"aa7e8778c633ed1ed1d867e08a5346745733e8ab7f33988ddcf27636190a2cb7\": container with ID starting with aa7e8778c633ed1ed1d867e08a5346745733e8ab7f33988ddcf27636190a2cb7 not found: ID does not exist" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.966461 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6"] Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.969383 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-68sd6"] Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.969773 4871 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.969808 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tsls8\" (UniqueName: \"kubernetes.io/projected/d2225428-c79d-4406-9238-432797b4fa99-kube-api-access-tsls8\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.969820 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.969832 4871 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d2225428-c79d-4406-9238-432797b4fa99-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.969846 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hbtkx\" (UniqueName: \"kubernetes.io/projected/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-kube-api-access-hbtkx\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.969858 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.969869 4871 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d2225428-c79d-4406-9238-432797b4fa99-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.969879 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2225428-c79d-4406-9238-432797b4fa99-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:08 crc kubenswrapper[4871]: I1126 05:38:08.969888 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d2225428-c79d-4406-9238-432797b4fa99-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.715414 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4"] Nov 26 05:38:09 crc kubenswrapper[4871]: E1126 05:38:09.716041 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10c1cc5f-f24d-434a-a9dd-0e0d8d22c153" containerName="route-controller-manager" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.716056 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="10c1cc5f-f24d-434a-a9dd-0e0d8d22c153" containerName="route-controller-manager" Nov 26 05:38:09 crc kubenswrapper[4871]: E1126 05:38:09.716078 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2225428-c79d-4406-9238-432797b4fa99" containerName="controller-manager" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.716086 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2225428-c79d-4406-9238-432797b4fa99" containerName="controller-manager" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.716204 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="10c1cc5f-f24d-434a-a9dd-0e0d8d22c153" containerName="route-controller-manager" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.716222 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2225428-c79d-4406-9238-432797b4fa99" containerName="controller-manager" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.717675 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-77f6fcf467-vc8kt"] Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.717823 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.718917 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.721430 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.721691 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.721862 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.722100 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.722249 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.722427 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.724018 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.724173 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4"] Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.724181 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.724246 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.724235 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.727105 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-77f6fcf467-vc8kt"] Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.727773 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.727955 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.730396 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.786449 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0e012089-abb2-41c1-af34-38ac707e6bf0-client-ca\") pod \"route-controller-manager-84f4986b5c-76hr4\" (UID: \"0e012089-abb2-41c1-af34-38ac707e6bf0\") " pod="openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.786503 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e012089-abb2-41c1-af34-38ac707e6bf0-config\") pod \"route-controller-manager-84f4986b5c-76hr4\" (UID: \"0e012089-abb2-41c1-af34-38ac707e6bf0\") " pod="openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.786560 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c71a179d-41d3-4850-81b7-433490a981d6-serving-cert\") pod \"controller-manager-77f6fcf467-vc8kt\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.786596 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0e012089-abb2-41c1-af34-38ac707e6bf0-serving-cert\") pod \"route-controller-manager-84f4986b5c-76hr4\" (UID: \"0e012089-abb2-41c1-af34-38ac707e6bf0\") " pod="openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.786630 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6lg4\" (UniqueName: \"kubernetes.io/projected/0e012089-abb2-41c1-af34-38ac707e6bf0-kube-api-access-w6lg4\") pod \"route-controller-manager-84f4986b5c-76hr4\" (UID: \"0e012089-abb2-41c1-af34-38ac707e6bf0\") " pod="openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.786749 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27ktk\" (UniqueName: \"kubernetes.io/projected/c71a179d-41d3-4850-81b7-433490a981d6-kube-api-access-27ktk\") pod \"controller-manager-77f6fcf467-vc8kt\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.786776 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c71a179d-41d3-4850-81b7-433490a981d6-client-ca\") pod \"controller-manager-77f6fcf467-vc8kt\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.786847 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c71a179d-41d3-4850-81b7-433490a981d6-config\") pod \"controller-manager-77f6fcf467-vc8kt\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.786867 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c71a179d-41d3-4850-81b7-433490a981d6-proxy-ca-bundles\") pod \"controller-manager-77f6fcf467-vc8kt\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.799268 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-77f6fcf467-vc8kt"] Nov 26 05:38:09 crc kubenswrapper[4871]: E1126 05:38:09.799822 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[client-ca config kube-api-access-27ktk proxy-ca-bundles serving-cert], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" podUID="c71a179d-41d3-4850-81b7-433490a981d6" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.806147 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4"] Nov 26 05:38:09 crc kubenswrapper[4871]: E1126 05:38:09.806689 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[client-ca config kube-api-access-w6lg4 serving-cert], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4" podUID="0e012089-abb2-41c1-af34-38ac707e6bf0" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.887687 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0e012089-abb2-41c1-af34-38ac707e6bf0-client-ca\") pod \"route-controller-manager-84f4986b5c-76hr4\" (UID: \"0e012089-abb2-41c1-af34-38ac707e6bf0\") " pod="openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.887742 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e012089-abb2-41c1-af34-38ac707e6bf0-config\") pod \"route-controller-manager-84f4986b5c-76hr4\" (UID: \"0e012089-abb2-41c1-af34-38ac707e6bf0\") " pod="openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.887773 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c71a179d-41d3-4850-81b7-433490a981d6-serving-cert\") pod \"controller-manager-77f6fcf467-vc8kt\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.887800 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0e012089-abb2-41c1-af34-38ac707e6bf0-serving-cert\") pod \"route-controller-manager-84f4986b5c-76hr4\" (UID: \"0e012089-abb2-41c1-af34-38ac707e6bf0\") " pod="openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.887828 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6lg4\" (UniqueName: \"kubernetes.io/projected/0e012089-abb2-41c1-af34-38ac707e6bf0-kube-api-access-w6lg4\") pod \"route-controller-manager-84f4986b5c-76hr4\" (UID: \"0e012089-abb2-41c1-af34-38ac707e6bf0\") " pod="openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.887893 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27ktk\" (UniqueName: \"kubernetes.io/projected/c71a179d-41d3-4850-81b7-433490a981d6-kube-api-access-27ktk\") pod \"controller-manager-77f6fcf467-vc8kt\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.887919 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c71a179d-41d3-4850-81b7-433490a981d6-client-ca\") pod \"controller-manager-77f6fcf467-vc8kt\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.887961 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c71a179d-41d3-4850-81b7-433490a981d6-config\") pod \"controller-manager-77f6fcf467-vc8kt\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.887983 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c71a179d-41d3-4850-81b7-433490a981d6-proxy-ca-bundles\") pod \"controller-manager-77f6fcf467-vc8kt\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.888494 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0e012089-abb2-41c1-af34-38ac707e6bf0-client-ca\") pod \"route-controller-manager-84f4986b5c-76hr4\" (UID: \"0e012089-abb2-41c1-af34-38ac707e6bf0\") " pod="openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.889979 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c71a179d-41d3-4850-81b7-433490a981d6-client-ca\") pod \"controller-manager-77f6fcf467-vc8kt\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.890369 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c71a179d-41d3-4850-81b7-433490a981d6-config\") pod \"controller-manager-77f6fcf467-vc8kt\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.891135 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c71a179d-41d3-4850-81b7-433490a981d6-proxy-ca-bundles\") pod \"controller-manager-77f6fcf467-vc8kt\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.892233 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e012089-abb2-41c1-af34-38ac707e6bf0-config\") pod \"route-controller-manager-84f4986b5c-76hr4\" (UID: \"0e012089-abb2-41c1-af34-38ac707e6bf0\") " pod="openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.894041 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0e012089-abb2-41c1-af34-38ac707e6bf0-serving-cert\") pod \"route-controller-manager-84f4986b5c-76hr4\" (UID: \"0e012089-abb2-41c1-af34-38ac707e6bf0\") " pod="openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.907156 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6lg4\" (UniqueName: \"kubernetes.io/projected/0e012089-abb2-41c1-af34-38ac707e6bf0-kube-api-access-w6lg4\") pod \"route-controller-manager-84f4986b5c-76hr4\" (UID: \"0e012089-abb2-41c1-af34-38ac707e6bf0\") " pod="openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.911348 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c71a179d-41d3-4850-81b7-433490a981d6-serving-cert\") pod \"controller-manager-77f6fcf467-vc8kt\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.913482 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27ktk\" (UniqueName: \"kubernetes.io/projected/c71a179d-41d3-4850-81b7-433490a981d6-kube-api-access-27ktk\") pod \"controller-manager-77f6fcf467-vc8kt\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.931663 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.931661 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.949709 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4" Nov 26 05:38:09 crc kubenswrapper[4871]: I1126 05:38:09.956189 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.090688 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c71a179d-41d3-4850-81b7-433490a981d6-config\") pod \"c71a179d-41d3-4850-81b7-433490a981d6\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.090732 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c71a179d-41d3-4850-81b7-433490a981d6-serving-cert\") pod \"c71a179d-41d3-4850-81b7-433490a981d6\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.090754 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0e012089-abb2-41c1-af34-38ac707e6bf0-client-ca\") pod \"0e012089-abb2-41c1-af34-38ac707e6bf0\" (UID: \"0e012089-abb2-41c1-af34-38ac707e6bf0\") " Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.090777 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-27ktk\" (UniqueName: \"kubernetes.io/projected/c71a179d-41d3-4850-81b7-433490a981d6-kube-api-access-27ktk\") pod \"c71a179d-41d3-4850-81b7-433490a981d6\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.090827 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c71a179d-41d3-4850-81b7-433490a981d6-client-ca\") pod \"c71a179d-41d3-4850-81b7-433490a981d6\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.090849 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0e012089-abb2-41c1-af34-38ac707e6bf0-serving-cert\") pod \"0e012089-abb2-41c1-af34-38ac707e6bf0\" (UID: \"0e012089-abb2-41c1-af34-38ac707e6bf0\") " Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.090888 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c71a179d-41d3-4850-81b7-433490a981d6-proxy-ca-bundles\") pod \"c71a179d-41d3-4850-81b7-433490a981d6\" (UID: \"c71a179d-41d3-4850-81b7-433490a981d6\") " Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.090914 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e012089-abb2-41c1-af34-38ac707e6bf0-config\") pod \"0e012089-abb2-41c1-af34-38ac707e6bf0\" (UID: \"0e012089-abb2-41c1-af34-38ac707e6bf0\") " Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.090942 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6lg4\" (UniqueName: \"kubernetes.io/projected/0e012089-abb2-41c1-af34-38ac707e6bf0-kube-api-access-w6lg4\") pod \"0e012089-abb2-41c1-af34-38ac707e6bf0\" (UID: \"0e012089-abb2-41c1-af34-38ac707e6bf0\") " Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.092298 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c71a179d-41d3-4850-81b7-433490a981d6-config" (OuterVolumeSpecName: "config") pod "c71a179d-41d3-4850-81b7-433490a981d6" (UID: "c71a179d-41d3-4850-81b7-433490a981d6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.093300 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e012089-abb2-41c1-af34-38ac707e6bf0-client-ca" (OuterVolumeSpecName: "client-ca") pod "0e012089-abb2-41c1-af34-38ac707e6bf0" (UID: "0e012089-abb2-41c1-af34-38ac707e6bf0"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.093758 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c71a179d-41d3-4850-81b7-433490a981d6-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "c71a179d-41d3-4850-81b7-433490a981d6" (UID: "c71a179d-41d3-4850-81b7-433490a981d6"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.094097 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e012089-abb2-41c1-af34-38ac707e6bf0-config" (OuterVolumeSpecName: "config") pod "0e012089-abb2-41c1-af34-38ac707e6bf0" (UID: "0e012089-abb2-41c1-af34-38ac707e6bf0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.094620 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c71a179d-41d3-4850-81b7-433490a981d6-client-ca" (OuterVolumeSpecName: "client-ca") pod "c71a179d-41d3-4850-81b7-433490a981d6" (UID: "c71a179d-41d3-4850-81b7-433490a981d6"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.094854 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c71a179d-41d3-4850-81b7-433490a981d6-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c71a179d-41d3-4850-81b7-433490a981d6" (UID: "c71a179d-41d3-4850-81b7-433490a981d6"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.095327 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e012089-abb2-41c1-af34-38ac707e6bf0-kube-api-access-w6lg4" (OuterVolumeSpecName: "kube-api-access-w6lg4") pod "0e012089-abb2-41c1-af34-38ac707e6bf0" (UID: "0e012089-abb2-41c1-af34-38ac707e6bf0"). InnerVolumeSpecName "kube-api-access-w6lg4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.096342 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c71a179d-41d3-4850-81b7-433490a981d6-kube-api-access-27ktk" (OuterVolumeSpecName: "kube-api-access-27ktk") pod "c71a179d-41d3-4850-81b7-433490a981d6" (UID: "c71a179d-41d3-4850-81b7-433490a981d6"). InnerVolumeSpecName "kube-api-access-27ktk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.098639 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e012089-abb2-41c1-af34-38ac707e6bf0-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0e012089-abb2-41c1-af34-38ac707e6bf0" (UID: "0e012089-abb2-41c1-af34-38ac707e6bf0"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.202275 4871 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c71a179d-41d3-4850-81b7-433490a981d6-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.202314 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e012089-abb2-41c1-af34-38ac707e6bf0-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.202324 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6lg4\" (UniqueName: \"kubernetes.io/projected/0e012089-abb2-41c1-af34-38ac707e6bf0-kube-api-access-w6lg4\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.202335 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c71a179d-41d3-4850-81b7-433490a981d6-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.202344 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c71a179d-41d3-4850-81b7-433490a981d6-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.202352 4871 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/0e012089-abb2-41c1-af34-38ac707e6bf0-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.202360 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-27ktk\" (UniqueName: \"kubernetes.io/projected/c71a179d-41d3-4850-81b7-433490a981d6-kube-api-access-27ktk\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.202368 4871 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c71a179d-41d3-4850-81b7-433490a981d6-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.202376 4871 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0e012089-abb2-41c1-af34-38ac707e6bf0-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.515098 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10c1cc5f-f24d-434a-a9dd-0e0d8d22c153" path="/var/lib/kubelet/pods/10c1cc5f-f24d-434a-a9dd-0e0d8d22c153/volumes" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.515925 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2225428-c79d-4406-9238-432797b4fa99" path="/var/lib/kubelet/pods/d2225428-c79d-4406-9238-432797b4fa99/volumes" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.937380 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-77f6fcf467-vc8kt" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.937394 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.984506 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt"] Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.985481 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.990470 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.992389 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.992627 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-77f6fcf467-vc8kt"] Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.992678 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.992710 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.992718 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.992775 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 05:38:10 crc kubenswrapper[4871]: I1126 05:38:10.999280 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-77f6fcf467-vc8kt"] Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.000359 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.001301 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt"] Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.011618 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dgft\" (UniqueName: \"kubernetes.io/projected/e61839f4-36e1-4d25-b438-25c44b7237ca-kube-api-access-8dgft\") pod \"controller-manager-c84fd6f5c-ttvjt\" (UID: \"e61839f4-36e1-4d25-b438-25c44b7237ca\") " pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.011689 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e61839f4-36e1-4d25-b438-25c44b7237ca-client-ca\") pod \"controller-manager-c84fd6f5c-ttvjt\" (UID: \"e61839f4-36e1-4d25-b438-25c44b7237ca\") " pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.011722 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e61839f4-36e1-4d25-b438-25c44b7237ca-proxy-ca-bundles\") pod \"controller-manager-c84fd6f5c-ttvjt\" (UID: \"e61839f4-36e1-4d25-b438-25c44b7237ca\") " pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.011754 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e61839f4-36e1-4d25-b438-25c44b7237ca-config\") pod \"controller-manager-c84fd6f5c-ttvjt\" (UID: \"e61839f4-36e1-4d25-b438-25c44b7237ca\") " pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.011851 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e61839f4-36e1-4d25-b438-25c44b7237ca-serving-cert\") pod \"controller-manager-c84fd6f5c-ttvjt\" (UID: \"e61839f4-36e1-4d25-b438-25c44b7237ca\") " pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.017988 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4"] Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.022639 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84f4986b5c-76hr4"] Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.112962 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dgft\" (UniqueName: \"kubernetes.io/projected/e61839f4-36e1-4d25-b438-25c44b7237ca-kube-api-access-8dgft\") pod \"controller-manager-c84fd6f5c-ttvjt\" (UID: \"e61839f4-36e1-4d25-b438-25c44b7237ca\") " pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.113045 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e61839f4-36e1-4d25-b438-25c44b7237ca-client-ca\") pod \"controller-manager-c84fd6f5c-ttvjt\" (UID: \"e61839f4-36e1-4d25-b438-25c44b7237ca\") " pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.113075 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e61839f4-36e1-4d25-b438-25c44b7237ca-proxy-ca-bundles\") pod \"controller-manager-c84fd6f5c-ttvjt\" (UID: \"e61839f4-36e1-4d25-b438-25c44b7237ca\") " pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.113108 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e61839f4-36e1-4d25-b438-25c44b7237ca-config\") pod \"controller-manager-c84fd6f5c-ttvjt\" (UID: \"e61839f4-36e1-4d25-b438-25c44b7237ca\") " pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.113158 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e61839f4-36e1-4d25-b438-25c44b7237ca-serving-cert\") pod \"controller-manager-c84fd6f5c-ttvjt\" (UID: \"e61839f4-36e1-4d25-b438-25c44b7237ca\") " pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.114404 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e61839f4-36e1-4d25-b438-25c44b7237ca-proxy-ca-bundles\") pod \"controller-manager-c84fd6f5c-ttvjt\" (UID: \"e61839f4-36e1-4d25-b438-25c44b7237ca\") " pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.115233 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e61839f4-36e1-4d25-b438-25c44b7237ca-client-ca\") pod \"controller-manager-c84fd6f5c-ttvjt\" (UID: \"e61839f4-36e1-4d25-b438-25c44b7237ca\") " pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.115782 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e61839f4-36e1-4d25-b438-25c44b7237ca-config\") pod \"controller-manager-c84fd6f5c-ttvjt\" (UID: \"e61839f4-36e1-4d25-b438-25c44b7237ca\") " pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.118466 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e61839f4-36e1-4d25-b438-25c44b7237ca-serving-cert\") pod \"controller-manager-c84fd6f5c-ttvjt\" (UID: \"e61839f4-36e1-4d25-b438-25c44b7237ca\") " pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.127866 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dgft\" (UniqueName: \"kubernetes.io/projected/e61839f4-36e1-4d25-b438-25c44b7237ca-kube-api-access-8dgft\") pod \"controller-manager-c84fd6f5c-ttvjt\" (UID: \"e61839f4-36e1-4d25-b438-25c44b7237ca\") " pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.330578 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.985206 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" event={"ID":"0f2d5628-2ad3-400c-bc77-b0251683a83a","Type":"ContainerStarted","Data":"cdc9c87da2071f51195db8368cea3faabab605548e3e0bd0a674606a9811ca20"} Nov 26 05:38:11 crc kubenswrapper[4871]: I1126 05:38:11.985543 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" Nov 26 05:38:12 crc kubenswrapper[4871]: I1126 05:38:12.004036 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" podStartSLOduration=1.923298796 podStartE2EDuration="5.003999027s" podCreationTimestamp="2025-11-26 05:38:07 +0000 UTC" firstStartedPulling="2025-11-26 05:38:07.826725544 +0000 UTC m=+746.009777120" lastFinishedPulling="2025-11-26 05:38:10.907425765 +0000 UTC m=+749.090477351" observedRunningTime="2025-11-26 05:38:12.003049583 +0000 UTC m=+750.186101169" watchObservedRunningTime="2025-11-26 05:38:12.003999027 +0000 UTC m=+750.187050613" Nov 26 05:38:12 crc kubenswrapper[4871]: I1126 05:38:12.513962 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e012089-abb2-41c1-af34-38ac707e6bf0" path="/var/lib/kubelet/pods/0e012089-abb2-41c1-af34-38ac707e6bf0/volumes" Nov 26 05:38:12 crc kubenswrapper[4871]: I1126 05:38:12.514321 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c71a179d-41d3-4850-81b7-433490a981d6" path="/var/lib/kubelet/pods/c71a179d-41d3-4850-81b7-433490a981d6/volumes" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.084263 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt"] Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.724296 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r"] Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.725224 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.728918 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.728960 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.729068 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.729159 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.729209 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.729671 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.743623 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r"] Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.845236 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1670801e-9f36-41fa-804d-064ac2d07885-serving-cert\") pod \"route-controller-manager-65ffc995d8-wgc6r\" (UID: \"1670801e-9f36-41fa-804d-064ac2d07885\") " pod="openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.845349 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gs47\" (UniqueName: \"kubernetes.io/projected/1670801e-9f36-41fa-804d-064ac2d07885-kube-api-access-5gs47\") pod \"route-controller-manager-65ffc995d8-wgc6r\" (UID: \"1670801e-9f36-41fa-804d-064ac2d07885\") " pod="openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.845382 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1670801e-9f36-41fa-804d-064ac2d07885-client-ca\") pod \"route-controller-manager-65ffc995d8-wgc6r\" (UID: \"1670801e-9f36-41fa-804d-064ac2d07885\") " pod="openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.845411 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1670801e-9f36-41fa-804d-064ac2d07885-config\") pod \"route-controller-manager-65ffc995d8-wgc6r\" (UID: \"1670801e-9f36-41fa-804d-064ac2d07885\") " pod="openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.946626 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gs47\" (UniqueName: \"kubernetes.io/projected/1670801e-9f36-41fa-804d-064ac2d07885-kube-api-access-5gs47\") pod \"route-controller-manager-65ffc995d8-wgc6r\" (UID: \"1670801e-9f36-41fa-804d-064ac2d07885\") " pod="openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.946677 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1670801e-9f36-41fa-804d-064ac2d07885-client-ca\") pod \"route-controller-manager-65ffc995d8-wgc6r\" (UID: \"1670801e-9f36-41fa-804d-064ac2d07885\") " pod="openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.946706 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1670801e-9f36-41fa-804d-064ac2d07885-config\") pod \"route-controller-manager-65ffc995d8-wgc6r\" (UID: \"1670801e-9f36-41fa-804d-064ac2d07885\") " pod="openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.946745 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1670801e-9f36-41fa-804d-064ac2d07885-serving-cert\") pod \"route-controller-manager-65ffc995d8-wgc6r\" (UID: \"1670801e-9f36-41fa-804d-064ac2d07885\") " pod="openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.948772 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1670801e-9f36-41fa-804d-064ac2d07885-client-ca\") pod \"route-controller-manager-65ffc995d8-wgc6r\" (UID: \"1670801e-9f36-41fa-804d-064ac2d07885\") " pod="openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.949071 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1670801e-9f36-41fa-804d-064ac2d07885-config\") pod \"route-controller-manager-65ffc995d8-wgc6r\" (UID: \"1670801e-9f36-41fa-804d-064ac2d07885\") " pod="openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.967793 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1670801e-9f36-41fa-804d-064ac2d07885-serving-cert\") pod \"route-controller-manager-65ffc995d8-wgc6r\" (UID: \"1670801e-9f36-41fa-804d-064ac2d07885\") " pod="openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.968442 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gs47\" (UniqueName: \"kubernetes.io/projected/1670801e-9f36-41fa-804d-064ac2d07885-kube-api-access-5gs47\") pod \"route-controller-manager-65ffc995d8-wgc6r\" (UID: \"1670801e-9f36-41fa-804d-064ac2d07885\") " pod="openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.997448 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-69c6746fd5-pkb65" event={"ID":"35333648-4e74-4c66-803e-091d7d5673ca","Type":"ContainerStarted","Data":"b33990225f2753d38fb83a1233be7e4584e2cc0233a667958988c1581331793a"} Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.997554 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-69c6746fd5-pkb65" Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.998600 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" event={"ID":"e61839f4-36e1-4d25-b438-25c44b7237ca","Type":"ContainerStarted","Data":"2bee6bff3b1990359588574dd887232013dea483beaeafcc36fb02c608150ff8"} Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.998624 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" event={"ID":"e61839f4-36e1-4d25-b438-25c44b7237ca","Type":"ContainerStarted","Data":"04b794660a844a16ac4a86fa8cb9832a9ca07bd5d081ff8f92099cb84185c9cc"} Nov 26 05:38:13 crc kubenswrapper[4871]: I1126 05:38:13.998853 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" Nov 26 05:38:14 crc kubenswrapper[4871]: I1126 05:38:14.002626 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" Nov 26 05:38:14 crc kubenswrapper[4871]: I1126 05:38:14.019814 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-69c6746fd5-pkb65" podStartSLOduration=2.43802009 podStartE2EDuration="7.019797053s" podCreationTimestamp="2025-11-26 05:38:07 +0000 UTC" firstStartedPulling="2025-11-26 05:38:08.377092833 +0000 UTC m=+746.560144419" lastFinishedPulling="2025-11-26 05:38:12.958869796 +0000 UTC m=+751.141921382" observedRunningTime="2025-11-26 05:38:14.017265499 +0000 UTC m=+752.200317085" watchObservedRunningTime="2025-11-26 05:38:14.019797053 +0000 UTC m=+752.202848639" Nov 26 05:38:14 crc kubenswrapper[4871]: I1126 05:38:14.034852 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-c84fd6f5c-ttvjt" podStartSLOduration=5.034835241 podStartE2EDuration="5.034835241s" podCreationTimestamp="2025-11-26 05:38:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:38:14.034429851 +0000 UTC m=+752.217481437" watchObservedRunningTime="2025-11-26 05:38:14.034835241 +0000 UTC m=+752.217886827" Nov 26 05:38:14 crc kubenswrapper[4871]: I1126 05:38:14.039037 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r" Nov 26 05:38:14 crc kubenswrapper[4871]: I1126 05:38:14.471848 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r"] Nov 26 05:38:14 crc kubenswrapper[4871]: W1126 05:38:14.483180 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1670801e_9f36_41fa_804d_064ac2d07885.slice/crio-61fd072d02706ab57c55e53ddeafe4ad15af72323e82fe459c00bad205b637a2 WatchSource:0}: Error finding container 61fd072d02706ab57c55e53ddeafe4ad15af72323e82fe459c00bad205b637a2: Status 404 returned error can't find the container with id 61fd072d02706ab57c55e53ddeafe4ad15af72323e82fe459c00bad205b637a2 Nov 26 05:38:15 crc kubenswrapper[4871]: I1126 05:38:15.006299 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r" event={"ID":"1670801e-9f36-41fa-804d-064ac2d07885","Type":"ContainerStarted","Data":"8d15a451d528df7eff4283095d0537d979949f2571febaec696c959645bf809c"} Nov 26 05:38:15 crc kubenswrapper[4871]: I1126 05:38:15.006597 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r" event={"ID":"1670801e-9f36-41fa-804d-064ac2d07885","Type":"ContainerStarted","Data":"61fd072d02706ab57c55e53ddeafe4ad15af72323e82fe459c00bad205b637a2"} Nov 26 05:38:15 crc kubenswrapper[4871]: I1126 05:38:15.007475 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r" Nov 26 05:38:15 crc kubenswrapper[4871]: I1126 05:38:15.309403 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r" Nov 26 05:38:15 crc kubenswrapper[4871]: I1126 05:38:15.331359 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-65ffc995d8-wgc6r" podStartSLOduration=6.33133874 podStartE2EDuration="6.33133874s" podCreationTimestamp="2025-11-26 05:38:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:38:15.037175864 +0000 UTC m=+753.220227450" watchObservedRunningTime="2025-11-26 05:38:15.33133874 +0000 UTC m=+753.514390336" Nov 26 05:38:17 crc kubenswrapper[4871]: I1126 05:38:17.514657 4871 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 05:38:23 crc kubenswrapper[4871]: I1126 05:38:23.614926 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:38:23 crc kubenswrapper[4871]: I1126 05:38:23.615487 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:38:23 crc kubenswrapper[4871]: I1126 05:38:23.615565 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:38:23 crc kubenswrapper[4871]: I1126 05:38:23.616347 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f9b3c6b7dc711fbab7cfc1df233a4b33f288cd38725d31ae281cb8abef183fd7"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 05:38:23 crc kubenswrapper[4871]: I1126 05:38:23.616411 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://f9b3c6b7dc711fbab7cfc1df233a4b33f288cd38725d31ae281cb8abef183fd7" gracePeriod=600 Nov 26 05:38:24 crc kubenswrapper[4871]: I1126 05:38:24.065545 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="f9b3c6b7dc711fbab7cfc1df233a4b33f288cd38725d31ae281cb8abef183fd7" exitCode=0 Nov 26 05:38:24 crc kubenswrapper[4871]: I1126 05:38:24.065597 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"f9b3c6b7dc711fbab7cfc1df233a4b33f288cd38725d31ae281cb8abef183fd7"} Nov 26 05:38:24 crc kubenswrapper[4871]: I1126 05:38:24.066392 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"352b2b280740af55cbe8f36dbe220adf905af3370f34cf811c417077b6fe54f3"} Nov 26 05:38:24 crc kubenswrapper[4871]: I1126 05:38:24.066484 4871 scope.go:117] "RemoveContainer" containerID="73505ee26772aa1df09c89bae702b19bd7861dae0e72aa5f1011d13c2064a8d5" Nov 26 05:38:25 crc kubenswrapper[4871]: I1126 05:38:25.382095 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tnfdj"] Nov 26 05:38:25 crc kubenswrapper[4871]: I1126 05:38:25.385237 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tnfdj" Nov 26 05:38:25 crc kubenswrapper[4871]: I1126 05:38:25.395708 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tnfdj"] Nov 26 05:38:25 crc kubenswrapper[4871]: I1126 05:38:25.504261 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56f7l\" (UniqueName: \"kubernetes.io/projected/b459dc97-78c7-446f-bc31-6497ad7169f4-kube-api-access-56f7l\") pod \"community-operators-tnfdj\" (UID: \"b459dc97-78c7-446f-bc31-6497ad7169f4\") " pod="openshift-marketplace/community-operators-tnfdj" Nov 26 05:38:25 crc kubenswrapper[4871]: I1126 05:38:25.504556 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b459dc97-78c7-446f-bc31-6497ad7169f4-utilities\") pod \"community-operators-tnfdj\" (UID: \"b459dc97-78c7-446f-bc31-6497ad7169f4\") " pod="openshift-marketplace/community-operators-tnfdj" Nov 26 05:38:25 crc kubenswrapper[4871]: I1126 05:38:25.504679 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b459dc97-78c7-446f-bc31-6497ad7169f4-catalog-content\") pod \"community-operators-tnfdj\" (UID: \"b459dc97-78c7-446f-bc31-6497ad7169f4\") " pod="openshift-marketplace/community-operators-tnfdj" Nov 26 05:38:25 crc kubenswrapper[4871]: I1126 05:38:25.605458 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56f7l\" (UniqueName: \"kubernetes.io/projected/b459dc97-78c7-446f-bc31-6497ad7169f4-kube-api-access-56f7l\") pod \"community-operators-tnfdj\" (UID: \"b459dc97-78c7-446f-bc31-6497ad7169f4\") " pod="openshift-marketplace/community-operators-tnfdj" Nov 26 05:38:25 crc kubenswrapper[4871]: I1126 05:38:25.605550 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b459dc97-78c7-446f-bc31-6497ad7169f4-utilities\") pod \"community-operators-tnfdj\" (UID: \"b459dc97-78c7-446f-bc31-6497ad7169f4\") " pod="openshift-marketplace/community-operators-tnfdj" Nov 26 05:38:25 crc kubenswrapper[4871]: I1126 05:38:25.605580 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b459dc97-78c7-446f-bc31-6497ad7169f4-catalog-content\") pod \"community-operators-tnfdj\" (UID: \"b459dc97-78c7-446f-bc31-6497ad7169f4\") " pod="openshift-marketplace/community-operators-tnfdj" Nov 26 05:38:25 crc kubenswrapper[4871]: I1126 05:38:25.606228 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b459dc97-78c7-446f-bc31-6497ad7169f4-catalog-content\") pod \"community-operators-tnfdj\" (UID: \"b459dc97-78c7-446f-bc31-6497ad7169f4\") " pod="openshift-marketplace/community-operators-tnfdj" Nov 26 05:38:25 crc kubenswrapper[4871]: I1126 05:38:25.606328 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b459dc97-78c7-446f-bc31-6497ad7169f4-utilities\") pod \"community-operators-tnfdj\" (UID: \"b459dc97-78c7-446f-bc31-6497ad7169f4\") " pod="openshift-marketplace/community-operators-tnfdj" Nov 26 05:38:25 crc kubenswrapper[4871]: I1126 05:38:25.628435 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56f7l\" (UniqueName: \"kubernetes.io/projected/b459dc97-78c7-446f-bc31-6497ad7169f4-kube-api-access-56f7l\") pod \"community-operators-tnfdj\" (UID: \"b459dc97-78c7-446f-bc31-6497ad7169f4\") " pod="openshift-marketplace/community-operators-tnfdj" Nov 26 05:38:25 crc kubenswrapper[4871]: I1126 05:38:25.746706 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tnfdj" Nov 26 05:38:26 crc kubenswrapper[4871]: I1126 05:38:26.195437 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tnfdj"] Nov 26 05:38:26 crc kubenswrapper[4871]: W1126 05:38:26.217560 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb459dc97_78c7_446f_bc31_6497ad7169f4.slice/crio-2c07e4c20393af92b8e5c5049eeaca50c23a1132773a4b0892a3cd8f5f3af88a WatchSource:0}: Error finding container 2c07e4c20393af92b8e5c5049eeaca50c23a1132773a4b0892a3cd8f5f3af88a: Status 404 returned error can't find the container with id 2c07e4c20393af92b8e5c5049eeaca50c23a1132773a4b0892a3cd8f5f3af88a Nov 26 05:38:27 crc kubenswrapper[4871]: I1126 05:38:27.086106 4871 generic.go:334] "Generic (PLEG): container finished" podID="b459dc97-78c7-446f-bc31-6497ad7169f4" containerID="213db145123f2c37dbc7ff516e856114ec7826eeef41506e766e3484f29c933f" exitCode=0 Nov 26 05:38:27 crc kubenswrapper[4871]: I1126 05:38:27.086234 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tnfdj" event={"ID":"b459dc97-78c7-446f-bc31-6497ad7169f4","Type":"ContainerDied","Data":"213db145123f2c37dbc7ff516e856114ec7826eeef41506e766e3484f29c933f"} Nov 26 05:38:27 crc kubenswrapper[4871]: I1126 05:38:27.086509 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tnfdj" event={"ID":"b459dc97-78c7-446f-bc31-6497ad7169f4","Type":"ContainerStarted","Data":"2c07e4c20393af92b8e5c5049eeaca50c23a1132773a4b0892a3cd8f5f3af88a"} Nov 26 05:38:28 crc kubenswrapper[4871]: I1126 05:38:28.051670 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-69c6746fd5-pkb65" Nov 26 05:38:28 crc kubenswrapper[4871]: I1126 05:38:28.094002 4871 generic.go:334] "Generic (PLEG): container finished" podID="b459dc97-78c7-446f-bc31-6497ad7169f4" containerID="12e6b90bf409f6ea461d69c436792106e2d072358e905e6f345977639ca9d6f1" exitCode=0 Nov 26 05:38:28 crc kubenswrapper[4871]: I1126 05:38:28.094059 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tnfdj" event={"ID":"b459dc97-78c7-446f-bc31-6497ad7169f4","Type":"ContainerDied","Data":"12e6b90bf409f6ea461d69c436792106e2d072358e905e6f345977639ca9d6f1"} Nov 26 05:38:29 crc kubenswrapper[4871]: I1126 05:38:29.101888 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tnfdj" event={"ID":"b459dc97-78c7-446f-bc31-6497ad7169f4","Type":"ContainerStarted","Data":"609f251c67a3935c0f9c366ac806286b94ed74089a9f8a4b0b258eecf9f0448b"} Nov 26 05:38:29 crc kubenswrapper[4871]: I1126 05:38:29.121159 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tnfdj" podStartSLOduration=2.652551345 podStartE2EDuration="4.121142001s" podCreationTimestamp="2025-11-26 05:38:25 +0000 UTC" firstStartedPulling="2025-11-26 05:38:27.087989349 +0000 UTC m=+765.271040955" lastFinishedPulling="2025-11-26 05:38:28.556580025 +0000 UTC m=+766.739631611" observedRunningTime="2025-11-26 05:38:29.121018458 +0000 UTC m=+767.304070054" watchObservedRunningTime="2025-11-26 05:38:29.121142001 +0000 UTC m=+767.304193587" Nov 26 05:38:32 crc kubenswrapper[4871]: I1126 05:38:32.546473 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5t4ff"] Nov 26 05:38:32 crc kubenswrapper[4871]: I1126 05:38:32.550108 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5t4ff" Nov 26 05:38:32 crc kubenswrapper[4871]: I1126 05:38:32.560908 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5t4ff"] Nov 26 05:38:32 crc kubenswrapper[4871]: I1126 05:38:32.698286 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d6f7af6-100e-49ae-95e2-bfe8d4636814-utilities\") pod \"redhat-operators-5t4ff\" (UID: \"6d6f7af6-100e-49ae-95e2-bfe8d4636814\") " pod="openshift-marketplace/redhat-operators-5t4ff" Nov 26 05:38:32 crc kubenswrapper[4871]: I1126 05:38:32.698450 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d6f7af6-100e-49ae-95e2-bfe8d4636814-catalog-content\") pod \"redhat-operators-5t4ff\" (UID: \"6d6f7af6-100e-49ae-95e2-bfe8d4636814\") " pod="openshift-marketplace/redhat-operators-5t4ff" Nov 26 05:38:32 crc kubenswrapper[4871]: I1126 05:38:32.698474 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wn667\" (UniqueName: \"kubernetes.io/projected/6d6f7af6-100e-49ae-95e2-bfe8d4636814-kube-api-access-wn667\") pod \"redhat-operators-5t4ff\" (UID: \"6d6f7af6-100e-49ae-95e2-bfe8d4636814\") " pod="openshift-marketplace/redhat-operators-5t4ff" Nov 26 05:38:32 crc kubenswrapper[4871]: I1126 05:38:32.799960 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d6f7af6-100e-49ae-95e2-bfe8d4636814-catalog-content\") pod \"redhat-operators-5t4ff\" (UID: \"6d6f7af6-100e-49ae-95e2-bfe8d4636814\") " pod="openshift-marketplace/redhat-operators-5t4ff" Nov 26 05:38:32 crc kubenswrapper[4871]: I1126 05:38:32.800003 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wn667\" (UniqueName: \"kubernetes.io/projected/6d6f7af6-100e-49ae-95e2-bfe8d4636814-kube-api-access-wn667\") pod \"redhat-operators-5t4ff\" (UID: \"6d6f7af6-100e-49ae-95e2-bfe8d4636814\") " pod="openshift-marketplace/redhat-operators-5t4ff" Nov 26 05:38:32 crc kubenswrapper[4871]: I1126 05:38:32.800066 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d6f7af6-100e-49ae-95e2-bfe8d4636814-utilities\") pod \"redhat-operators-5t4ff\" (UID: \"6d6f7af6-100e-49ae-95e2-bfe8d4636814\") " pod="openshift-marketplace/redhat-operators-5t4ff" Nov 26 05:38:32 crc kubenswrapper[4871]: I1126 05:38:32.800466 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d6f7af6-100e-49ae-95e2-bfe8d4636814-catalog-content\") pod \"redhat-operators-5t4ff\" (UID: \"6d6f7af6-100e-49ae-95e2-bfe8d4636814\") " pod="openshift-marketplace/redhat-operators-5t4ff" Nov 26 05:38:32 crc kubenswrapper[4871]: I1126 05:38:32.800550 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d6f7af6-100e-49ae-95e2-bfe8d4636814-utilities\") pod \"redhat-operators-5t4ff\" (UID: \"6d6f7af6-100e-49ae-95e2-bfe8d4636814\") " pod="openshift-marketplace/redhat-operators-5t4ff" Nov 26 05:38:32 crc kubenswrapper[4871]: I1126 05:38:32.829335 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wn667\" (UniqueName: \"kubernetes.io/projected/6d6f7af6-100e-49ae-95e2-bfe8d4636814-kube-api-access-wn667\") pod \"redhat-operators-5t4ff\" (UID: \"6d6f7af6-100e-49ae-95e2-bfe8d4636814\") " pod="openshift-marketplace/redhat-operators-5t4ff" Nov 26 05:38:32 crc kubenswrapper[4871]: I1126 05:38:32.886140 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5t4ff" Nov 26 05:38:33 crc kubenswrapper[4871]: I1126 05:38:33.423477 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5t4ff"] Nov 26 05:38:34 crc kubenswrapper[4871]: I1126 05:38:34.134271 4871 generic.go:334] "Generic (PLEG): container finished" podID="6d6f7af6-100e-49ae-95e2-bfe8d4636814" containerID="340026a8223962b0270efb4e533d294ca0deeee59f796d53b060cec7df60f871" exitCode=0 Nov 26 05:38:34 crc kubenswrapper[4871]: I1126 05:38:34.134331 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5t4ff" event={"ID":"6d6f7af6-100e-49ae-95e2-bfe8d4636814","Type":"ContainerDied","Data":"340026a8223962b0270efb4e533d294ca0deeee59f796d53b060cec7df60f871"} Nov 26 05:38:34 crc kubenswrapper[4871]: I1126 05:38:34.134663 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5t4ff" event={"ID":"6d6f7af6-100e-49ae-95e2-bfe8d4636814","Type":"ContainerStarted","Data":"13bfe3409bafa114a936383aeba71b2471ac0fc2f41310aee3705f490d0e743a"} Nov 26 05:38:35 crc kubenswrapper[4871]: I1126 05:38:35.141924 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5t4ff" event={"ID":"6d6f7af6-100e-49ae-95e2-bfe8d4636814","Type":"ContainerStarted","Data":"ed947f7d52d0e11439ab186f1ead3037e28e086753994aa394274eb258ca2566"} Nov 26 05:38:35 crc kubenswrapper[4871]: I1126 05:38:35.746877 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tnfdj" Nov 26 05:38:35 crc kubenswrapper[4871]: I1126 05:38:35.746973 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tnfdj" Nov 26 05:38:35 crc kubenswrapper[4871]: I1126 05:38:35.788107 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tnfdj" Nov 26 05:38:36 crc kubenswrapper[4871]: I1126 05:38:36.151598 4871 generic.go:334] "Generic (PLEG): container finished" podID="6d6f7af6-100e-49ae-95e2-bfe8d4636814" containerID="ed947f7d52d0e11439ab186f1ead3037e28e086753994aa394274eb258ca2566" exitCode=0 Nov 26 05:38:36 crc kubenswrapper[4871]: I1126 05:38:36.151688 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5t4ff" event={"ID":"6d6f7af6-100e-49ae-95e2-bfe8d4636814","Type":"ContainerDied","Data":"ed947f7d52d0e11439ab186f1ead3037e28e086753994aa394274eb258ca2566"} Nov 26 05:38:36 crc kubenswrapper[4871]: I1126 05:38:36.229245 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tnfdj" Nov 26 05:38:37 crc kubenswrapper[4871]: I1126 05:38:37.183552 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5t4ff" event={"ID":"6d6f7af6-100e-49ae-95e2-bfe8d4636814","Type":"ContainerStarted","Data":"3e2b5f5ca30c39e6cdd2733aa427941ae0df1ff9ad98665c8863a882ad480b07"} Nov 26 05:38:37 crc kubenswrapper[4871]: I1126 05:38:37.206801 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5t4ff" podStartSLOduration=2.788725375 podStartE2EDuration="5.206772655s" podCreationTimestamp="2025-11-26 05:38:32 +0000 UTC" firstStartedPulling="2025-11-26 05:38:34.136122127 +0000 UTC m=+772.319173713" lastFinishedPulling="2025-11-26 05:38:36.554169407 +0000 UTC m=+774.737220993" observedRunningTime="2025-11-26 05:38:37.204117158 +0000 UTC m=+775.387168774" watchObservedRunningTime="2025-11-26 05:38:37.206772655 +0000 UTC m=+775.389824311" Nov 26 05:38:39 crc kubenswrapper[4871]: I1126 05:38:39.173875 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tnfdj"] Nov 26 05:38:39 crc kubenswrapper[4871]: I1126 05:38:39.175351 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tnfdj" podUID="b459dc97-78c7-446f-bc31-6497ad7169f4" containerName="registry-server" containerID="cri-o://609f251c67a3935c0f9c366ac806286b94ed74089a9f8a4b0b258eecf9f0448b" gracePeriod=2 Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.113541 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tnfdj" Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.196705 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b459dc97-78c7-446f-bc31-6497ad7169f4-catalog-content\") pod \"b459dc97-78c7-446f-bc31-6497ad7169f4\" (UID: \"b459dc97-78c7-446f-bc31-6497ad7169f4\") " Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.196769 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-56f7l\" (UniqueName: \"kubernetes.io/projected/b459dc97-78c7-446f-bc31-6497ad7169f4-kube-api-access-56f7l\") pod \"b459dc97-78c7-446f-bc31-6497ad7169f4\" (UID: \"b459dc97-78c7-446f-bc31-6497ad7169f4\") " Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.196789 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b459dc97-78c7-446f-bc31-6497ad7169f4-utilities\") pod \"b459dc97-78c7-446f-bc31-6497ad7169f4\" (UID: \"b459dc97-78c7-446f-bc31-6497ad7169f4\") " Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.197911 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b459dc97-78c7-446f-bc31-6497ad7169f4-utilities" (OuterVolumeSpecName: "utilities") pod "b459dc97-78c7-446f-bc31-6497ad7169f4" (UID: "b459dc97-78c7-446f-bc31-6497ad7169f4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.202519 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b459dc97-78c7-446f-bc31-6497ad7169f4-kube-api-access-56f7l" (OuterVolumeSpecName: "kube-api-access-56f7l") pod "b459dc97-78c7-446f-bc31-6497ad7169f4" (UID: "b459dc97-78c7-446f-bc31-6497ad7169f4"). InnerVolumeSpecName "kube-api-access-56f7l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.207374 4871 generic.go:334] "Generic (PLEG): container finished" podID="b459dc97-78c7-446f-bc31-6497ad7169f4" containerID="609f251c67a3935c0f9c366ac806286b94ed74089a9f8a4b0b258eecf9f0448b" exitCode=0 Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.207433 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tnfdj" event={"ID":"b459dc97-78c7-446f-bc31-6497ad7169f4","Type":"ContainerDied","Data":"609f251c67a3935c0f9c366ac806286b94ed74089a9f8a4b0b258eecf9f0448b"} Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.207462 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tnfdj" Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.207479 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tnfdj" event={"ID":"b459dc97-78c7-446f-bc31-6497ad7169f4","Type":"ContainerDied","Data":"2c07e4c20393af92b8e5c5049eeaca50c23a1132773a4b0892a3cd8f5f3af88a"} Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.207507 4871 scope.go:117] "RemoveContainer" containerID="609f251c67a3935c0f9c366ac806286b94ed74089a9f8a4b0b258eecf9f0448b" Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.226677 4871 scope.go:117] "RemoveContainer" containerID="12e6b90bf409f6ea461d69c436792106e2d072358e905e6f345977639ca9d6f1" Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.242362 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b459dc97-78c7-446f-bc31-6497ad7169f4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b459dc97-78c7-446f-bc31-6497ad7169f4" (UID: "b459dc97-78c7-446f-bc31-6497ad7169f4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.253903 4871 scope.go:117] "RemoveContainer" containerID="213db145123f2c37dbc7ff516e856114ec7826eeef41506e766e3484f29c933f" Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.273547 4871 scope.go:117] "RemoveContainer" containerID="609f251c67a3935c0f9c366ac806286b94ed74089a9f8a4b0b258eecf9f0448b" Nov 26 05:38:40 crc kubenswrapper[4871]: E1126 05:38:40.274108 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"609f251c67a3935c0f9c366ac806286b94ed74089a9f8a4b0b258eecf9f0448b\": container with ID starting with 609f251c67a3935c0f9c366ac806286b94ed74089a9f8a4b0b258eecf9f0448b not found: ID does not exist" containerID="609f251c67a3935c0f9c366ac806286b94ed74089a9f8a4b0b258eecf9f0448b" Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.274158 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"609f251c67a3935c0f9c366ac806286b94ed74089a9f8a4b0b258eecf9f0448b"} err="failed to get container status \"609f251c67a3935c0f9c366ac806286b94ed74089a9f8a4b0b258eecf9f0448b\": rpc error: code = NotFound desc = could not find container \"609f251c67a3935c0f9c366ac806286b94ed74089a9f8a4b0b258eecf9f0448b\": container with ID starting with 609f251c67a3935c0f9c366ac806286b94ed74089a9f8a4b0b258eecf9f0448b not found: ID does not exist" Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.274197 4871 scope.go:117] "RemoveContainer" containerID="12e6b90bf409f6ea461d69c436792106e2d072358e905e6f345977639ca9d6f1" Nov 26 05:38:40 crc kubenswrapper[4871]: E1126 05:38:40.274512 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12e6b90bf409f6ea461d69c436792106e2d072358e905e6f345977639ca9d6f1\": container with ID starting with 12e6b90bf409f6ea461d69c436792106e2d072358e905e6f345977639ca9d6f1 not found: ID does not exist" containerID="12e6b90bf409f6ea461d69c436792106e2d072358e905e6f345977639ca9d6f1" Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.274618 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12e6b90bf409f6ea461d69c436792106e2d072358e905e6f345977639ca9d6f1"} err="failed to get container status \"12e6b90bf409f6ea461d69c436792106e2d072358e905e6f345977639ca9d6f1\": rpc error: code = NotFound desc = could not find container \"12e6b90bf409f6ea461d69c436792106e2d072358e905e6f345977639ca9d6f1\": container with ID starting with 12e6b90bf409f6ea461d69c436792106e2d072358e905e6f345977639ca9d6f1 not found: ID does not exist" Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.274696 4871 scope.go:117] "RemoveContainer" containerID="213db145123f2c37dbc7ff516e856114ec7826eeef41506e766e3484f29c933f" Nov 26 05:38:40 crc kubenswrapper[4871]: E1126 05:38:40.275244 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"213db145123f2c37dbc7ff516e856114ec7826eeef41506e766e3484f29c933f\": container with ID starting with 213db145123f2c37dbc7ff516e856114ec7826eeef41506e766e3484f29c933f not found: ID does not exist" containerID="213db145123f2c37dbc7ff516e856114ec7826eeef41506e766e3484f29c933f" Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.275322 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"213db145123f2c37dbc7ff516e856114ec7826eeef41506e766e3484f29c933f"} err="failed to get container status \"213db145123f2c37dbc7ff516e856114ec7826eeef41506e766e3484f29c933f\": rpc error: code = NotFound desc = could not find container \"213db145123f2c37dbc7ff516e856114ec7826eeef41506e766e3484f29c933f\": container with ID starting with 213db145123f2c37dbc7ff516e856114ec7826eeef41506e766e3484f29c933f not found: ID does not exist" Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.298293 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b459dc97-78c7-446f-bc31-6497ad7169f4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.298389 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-56f7l\" (UniqueName: \"kubernetes.io/projected/b459dc97-78c7-446f-bc31-6497ad7169f4-kube-api-access-56f7l\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.298447 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b459dc97-78c7-446f-bc31-6497ad7169f4-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.535398 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tnfdj"] Nov 26 05:38:40 crc kubenswrapper[4871]: I1126 05:38:40.541230 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tnfdj"] Nov 26 05:38:41 crc kubenswrapper[4871]: I1126 05:38:41.586078 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2qmw5"] Nov 26 05:38:41 crc kubenswrapper[4871]: E1126 05:38:41.586780 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b459dc97-78c7-446f-bc31-6497ad7169f4" containerName="extract-utilities" Nov 26 05:38:41 crc kubenswrapper[4871]: I1126 05:38:41.586802 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="b459dc97-78c7-446f-bc31-6497ad7169f4" containerName="extract-utilities" Nov 26 05:38:41 crc kubenswrapper[4871]: E1126 05:38:41.586820 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b459dc97-78c7-446f-bc31-6497ad7169f4" containerName="extract-content" Nov 26 05:38:41 crc kubenswrapper[4871]: I1126 05:38:41.586833 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="b459dc97-78c7-446f-bc31-6497ad7169f4" containerName="extract-content" Nov 26 05:38:41 crc kubenswrapper[4871]: E1126 05:38:41.586856 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b459dc97-78c7-446f-bc31-6497ad7169f4" containerName="registry-server" Nov 26 05:38:41 crc kubenswrapper[4871]: I1126 05:38:41.586869 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="b459dc97-78c7-446f-bc31-6497ad7169f4" containerName="registry-server" Nov 26 05:38:41 crc kubenswrapper[4871]: I1126 05:38:41.587062 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="b459dc97-78c7-446f-bc31-6497ad7169f4" containerName="registry-server" Nov 26 05:38:41 crc kubenswrapper[4871]: I1126 05:38:41.591064 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2qmw5" Nov 26 05:38:41 crc kubenswrapper[4871]: I1126 05:38:41.599400 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2qmw5"] Nov 26 05:38:41 crc kubenswrapper[4871]: I1126 05:38:41.718589 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvdcc\" (UniqueName: \"kubernetes.io/projected/dde8f1f0-4e60-4359-97bf-84b2aff794b3-kube-api-access-zvdcc\") pod \"redhat-marketplace-2qmw5\" (UID: \"dde8f1f0-4e60-4359-97bf-84b2aff794b3\") " pod="openshift-marketplace/redhat-marketplace-2qmw5" Nov 26 05:38:41 crc kubenswrapper[4871]: I1126 05:38:41.718659 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dde8f1f0-4e60-4359-97bf-84b2aff794b3-utilities\") pod \"redhat-marketplace-2qmw5\" (UID: \"dde8f1f0-4e60-4359-97bf-84b2aff794b3\") " pod="openshift-marketplace/redhat-marketplace-2qmw5" Nov 26 05:38:41 crc kubenswrapper[4871]: I1126 05:38:41.718707 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dde8f1f0-4e60-4359-97bf-84b2aff794b3-catalog-content\") pod \"redhat-marketplace-2qmw5\" (UID: \"dde8f1f0-4e60-4359-97bf-84b2aff794b3\") " pod="openshift-marketplace/redhat-marketplace-2qmw5" Nov 26 05:38:41 crc kubenswrapper[4871]: I1126 05:38:41.820408 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvdcc\" (UniqueName: \"kubernetes.io/projected/dde8f1f0-4e60-4359-97bf-84b2aff794b3-kube-api-access-zvdcc\") pod \"redhat-marketplace-2qmw5\" (UID: \"dde8f1f0-4e60-4359-97bf-84b2aff794b3\") " pod="openshift-marketplace/redhat-marketplace-2qmw5" Nov 26 05:38:41 crc kubenswrapper[4871]: I1126 05:38:41.820485 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dde8f1f0-4e60-4359-97bf-84b2aff794b3-utilities\") pod \"redhat-marketplace-2qmw5\" (UID: \"dde8f1f0-4e60-4359-97bf-84b2aff794b3\") " pod="openshift-marketplace/redhat-marketplace-2qmw5" Nov 26 05:38:41 crc kubenswrapper[4871]: I1126 05:38:41.820514 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dde8f1f0-4e60-4359-97bf-84b2aff794b3-catalog-content\") pod \"redhat-marketplace-2qmw5\" (UID: \"dde8f1f0-4e60-4359-97bf-84b2aff794b3\") " pod="openshift-marketplace/redhat-marketplace-2qmw5" Nov 26 05:38:41 crc kubenswrapper[4871]: I1126 05:38:41.821194 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dde8f1f0-4e60-4359-97bf-84b2aff794b3-catalog-content\") pod \"redhat-marketplace-2qmw5\" (UID: \"dde8f1f0-4e60-4359-97bf-84b2aff794b3\") " pod="openshift-marketplace/redhat-marketplace-2qmw5" Nov 26 05:38:41 crc kubenswrapper[4871]: I1126 05:38:41.821497 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dde8f1f0-4e60-4359-97bf-84b2aff794b3-utilities\") pod \"redhat-marketplace-2qmw5\" (UID: \"dde8f1f0-4e60-4359-97bf-84b2aff794b3\") " pod="openshift-marketplace/redhat-marketplace-2qmw5" Nov 26 05:38:41 crc kubenswrapper[4871]: I1126 05:38:41.858792 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvdcc\" (UniqueName: \"kubernetes.io/projected/dde8f1f0-4e60-4359-97bf-84b2aff794b3-kube-api-access-zvdcc\") pod \"redhat-marketplace-2qmw5\" (UID: \"dde8f1f0-4e60-4359-97bf-84b2aff794b3\") " pod="openshift-marketplace/redhat-marketplace-2qmw5" Nov 26 05:38:41 crc kubenswrapper[4871]: I1126 05:38:41.946954 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2qmw5" Nov 26 05:38:42 crc kubenswrapper[4871]: I1126 05:38:42.363262 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2qmw5"] Nov 26 05:38:42 crc kubenswrapper[4871]: W1126 05:38:42.368515 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddde8f1f0_4e60_4359_97bf_84b2aff794b3.slice/crio-6177aba753a7cca16a18f2bfff1be66ccc4ff2f9fd066c8449148dc1256aa52e WatchSource:0}: Error finding container 6177aba753a7cca16a18f2bfff1be66ccc4ff2f9fd066c8449148dc1256aa52e: Status 404 returned error can't find the container with id 6177aba753a7cca16a18f2bfff1be66ccc4ff2f9fd066c8449148dc1256aa52e Nov 26 05:38:42 crc kubenswrapper[4871]: I1126 05:38:42.521286 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b459dc97-78c7-446f-bc31-6497ad7169f4" path="/var/lib/kubelet/pods/b459dc97-78c7-446f-bc31-6497ad7169f4/volumes" Nov 26 05:38:42 crc kubenswrapper[4871]: I1126 05:38:42.886669 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5t4ff" Nov 26 05:38:42 crc kubenswrapper[4871]: I1126 05:38:42.886713 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5t4ff" Nov 26 05:38:42 crc kubenswrapper[4871]: I1126 05:38:42.955832 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5t4ff" Nov 26 05:38:43 crc kubenswrapper[4871]: I1126 05:38:43.229302 4871 generic.go:334] "Generic (PLEG): container finished" podID="dde8f1f0-4e60-4359-97bf-84b2aff794b3" containerID="1bc1c6fe1dfe1f75f17f01516178b908b82b259546fde8b573c98b386ef3f61a" exitCode=0 Nov 26 05:38:43 crc kubenswrapper[4871]: I1126 05:38:43.229343 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2qmw5" event={"ID":"dde8f1f0-4e60-4359-97bf-84b2aff794b3","Type":"ContainerDied","Data":"1bc1c6fe1dfe1f75f17f01516178b908b82b259546fde8b573c98b386ef3f61a"} Nov 26 05:38:43 crc kubenswrapper[4871]: I1126 05:38:43.230114 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2qmw5" event={"ID":"dde8f1f0-4e60-4359-97bf-84b2aff794b3","Type":"ContainerStarted","Data":"6177aba753a7cca16a18f2bfff1be66ccc4ff2f9fd066c8449148dc1256aa52e"} Nov 26 05:38:43 crc kubenswrapper[4871]: I1126 05:38:43.278772 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5t4ff" Nov 26 05:38:44 crc kubenswrapper[4871]: I1126 05:38:44.243413 4871 generic.go:334] "Generic (PLEG): container finished" podID="dde8f1f0-4e60-4359-97bf-84b2aff794b3" containerID="8024d773251298d8fd3bdd592dea20410891f3ea59f48f7c721189063574c48a" exitCode=0 Nov 26 05:38:44 crc kubenswrapper[4871]: I1126 05:38:44.243575 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2qmw5" event={"ID":"dde8f1f0-4e60-4359-97bf-84b2aff794b3","Type":"ContainerDied","Data":"8024d773251298d8fd3bdd592dea20410891f3ea59f48f7c721189063574c48a"} Nov 26 05:38:45 crc kubenswrapper[4871]: I1126 05:38:45.255340 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2qmw5" event={"ID":"dde8f1f0-4e60-4359-97bf-84b2aff794b3","Type":"ContainerStarted","Data":"8397bd27c270817eb66b5f3313bc1305ba9b5ab4f15181c348bf5ccb2cf9bc9c"} Nov 26 05:38:45 crc kubenswrapper[4871]: I1126 05:38:45.290736 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2qmw5" podStartSLOduration=2.897826981 podStartE2EDuration="4.290707137s" podCreationTimestamp="2025-11-26 05:38:41 +0000 UTC" firstStartedPulling="2025-11-26 05:38:43.231334027 +0000 UTC m=+781.414385653" lastFinishedPulling="2025-11-26 05:38:44.624214223 +0000 UTC m=+782.807265809" observedRunningTime="2025-11-26 05:38:45.285391975 +0000 UTC m=+783.468443581" watchObservedRunningTime="2025-11-26 05:38:45.290707137 +0000 UTC m=+783.473758763" Nov 26 05:38:45 crc kubenswrapper[4871]: I1126 05:38:45.978161 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5t4ff"] Nov 26 05:38:45 crc kubenswrapper[4871]: I1126 05:38:45.978966 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5t4ff" podUID="6d6f7af6-100e-49ae-95e2-bfe8d4636814" containerName="registry-server" containerID="cri-o://3e2b5f5ca30c39e6cdd2733aa427941ae0df1ff9ad98665c8863a882ad480b07" gracePeriod=2 Nov 26 05:38:47 crc kubenswrapper[4871]: I1126 05:38:47.273137 4871 generic.go:334] "Generic (PLEG): container finished" podID="6d6f7af6-100e-49ae-95e2-bfe8d4636814" containerID="3e2b5f5ca30c39e6cdd2733aa427941ae0df1ff9ad98665c8863a882ad480b07" exitCode=0 Nov 26 05:38:47 crc kubenswrapper[4871]: I1126 05:38:47.273249 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5t4ff" event={"ID":"6d6f7af6-100e-49ae-95e2-bfe8d4636814","Type":"ContainerDied","Data":"3e2b5f5ca30c39e6cdd2733aa427941ae0df1ff9ad98665c8863a882ad480b07"} Nov 26 05:38:47 crc kubenswrapper[4871]: I1126 05:38:47.566769 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" Nov 26 05:38:47 crc kubenswrapper[4871]: I1126 05:38:47.787497 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5t4ff" Nov 26 05:38:47 crc kubenswrapper[4871]: I1126 05:38:47.912565 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wn667\" (UniqueName: \"kubernetes.io/projected/6d6f7af6-100e-49ae-95e2-bfe8d4636814-kube-api-access-wn667\") pod \"6d6f7af6-100e-49ae-95e2-bfe8d4636814\" (UID: \"6d6f7af6-100e-49ae-95e2-bfe8d4636814\") " Nov 26 05:38:47 crc kubenswrapper[4871]: I1126 05:38:47.912711 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d6f7af6-100e-49ae-95e2-bfe8d4636814-catalog-content\") pod \"6d6f7af6-100e-49ae-95e2-bfe8d4636814\" (UID: \"6d6f7af6-100e-49ae-95e2-bfe8d4636814\") " Nov 26 05:38:47 crc kubenswrapper[4871]: I1126 05:38:47.912767 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d6f7af6-100e-49ae-95e2-bfe8d4636814-utilities\") pod \"6d6f7af6-100e-49ae-95e2-bfe8d4636814\" (UID: \"6d6f7af6-100e-49ae-95e2-bfe8d4636814\") " Nov 26 05:38:47 crc kubenswrapper[4871]: I1126 05:38:47.913890 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d6f7af6-100e-49ae-95e2-bfe8d4636814-utilities" (OuterVolumeSpecName: "utilities") pod "6d6f7af6-100e-49ae-95e2-bfe8d4636814" (UID: "6d6f7af6-100e-49ae-95e2-bfe8d4636814"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:38:47 crc kubenswrapper[4871]: I1126 05:38:47.918000 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d6f7af6-100e-49ae-95e2-bfe8d4636814-kube-api-access-wn667" (OuterVolumeSpecName: "kube-api-access-wn667") pod "6d6f7af6-100e-49ae-95e2-bfe8d4636814" (UID: "6d6f7af6-100e-49ae-95e2-bfe8d4636814"). InnerVolumeSpecName "kube-api-access-wn667". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.010693 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d6f7af6-100e-49ae-95e2-bfe8d4636814-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6d6f7af6-100e-49ae-95e2-bfe8d4636814" (UID: "6d6f7af6-100e-49ae-95e2-bfe8d4636814"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.013671 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d6f7af6-100e-49ae-95e2-bfe8d4636814-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.013694 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d6f7af6-100e-49ae-95e2-bfe8d4636814-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.013705 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wn667\" (UniqueName: \"kubernetes.io/projected/6d6f7af6-100e-49ae-95e2-bfe8d4636814-kube-api-access-wn667\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.285597 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5t4ff" event={"ID":"6d6f7af6-100e-49ae-95e2-bfe8d4636814","Type":"ContainerDied","Data":"13bfe3409bafa114a936383aeba71b2471ac0fc2f41310aee3705f490d0e743a"} Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.285654 4871 scope.go:117] "RemoveContainer" containerID="3e2b5f5ca30c39e6cdd2733aa427941ae0df1ff9ad98665c8863a882ad480b07" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.285693 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5t4ff" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.311848 4871 scope.go:117] "RemoveContainer" containerID="ed947f7d52d0e11439ab186f1ead3037e28e086753994aa394274eb258ca2566" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.337952 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5t4ff"] Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.341614 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5t4ff"] Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.344850 4871 scope.go:117] "RemoveContainer" containerID="340026a8223962b0270efb4e533d294ca0deeee59f796d53b060cec7df60f871" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.461631 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-nnxbg"] Nov 26 05:38:48 crc kubenswrapper[4871]: E1126 05:38:48.462821 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d6f7af6-100e-49ae-95e2-bfe8d4636814" containerName="extract-content" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.462845 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d6f7af6-100e-49ae-95e2-bfe8d4636814" containerName="extract-content" Nov 26 05:38:48 crc kubenswrapper[4871]: E1126 05:38:48.462870 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d6f7af6-100e-49ae-95e2-bfe8d4636814" containerName="registry-server" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.462880 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d6f7af6-100e-49ae-95e2-bfe8d4636814" containerName="registry-server" Nov 26 05:38:48 crc kubenswrapper[4871]: E1126 05:38:48.462910 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d6f7af6-100e-49ae-95e2-bfe8d4636814" containerName="extract-utilities" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.462924 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d6f7af6-100e-49ae-95e2-bfe8d4636814" containerName="extract-utilities" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.464121 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d6f7af6-100e-49ae-95e2-bfe8d4636814" containerName="registry-server" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.479280 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-nnxbg" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.482091 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.482335 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-nr8gb" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.485046 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-dzkmx"] Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.488173 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.491778 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-nnxbg"] Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.500823 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.500934 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.515495 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d6f7af6-100e-49ae-95e2-bfe8d4636814" path="/var/lib/kubelet/pods/6d6f7af6-100e-49ae-95e2-bfe8d4636814/volumes" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.565754 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-2nt4b"] Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.579663 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-n9ldw"] Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.579857 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-2nt4b" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.581312 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-n9ldw" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.581919 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-n9ldw"] Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.582736 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.582959 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.583091 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.583211 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-dj9cw" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.583389 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.623412 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/1e670c64-e309-46e6-bdb8-797f85aee3c9-metrics\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.623484 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/1e670c64-e309-46e6-bdb8-797f85aee3c9-reloader\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.623514 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6266x\" (UniqueName: \"kubernetes.io/projected/1e670c64-e309-46e6-bdb8-797f85aee3c9-kube-api-access-6266x\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.623566 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/1e670c64-e309-46e6-bdb8-797f85aee3c9-frr-conf\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.623581 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5x5dr\" (UniqueName: \"kubernetes.io/projected/91a86765-1b7c-445b-8930-dc06e96fc752-kube-api-access-5x5dr\") pod \"frr-k8s-webhook-server-6998585d5-nnxbg\" (UID: \"91a86765-1b7c-445b-8930-dc06e96fc752\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-nnxbg" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.623602 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/1e670c64-e309-46e6-bdb8-797f85aee3c9-frr-startup\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.623623 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/91a86765-1b7c-445b-8930-dc06e96fc752-cert\") pod \"frr-k8s-webhook-server-6998585d5-nnxbg\" (UID: \"91a86765-1b7c-445b-8930-dc06e96fc752\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-nnxbg" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.623638 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1e670c64-e309-46e6-bdb8-797f85aee3c9-metrics-certs\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.623664 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/1e670c64-e309-46e6-bdb8-797f85aee3c9-frr-sockets\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.725082 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rcr5\" (UniqueName: \"kubernetes.io/projected/e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b-kube-api-access-7rcr5\") pod \"speaker-2nt4b\" (UID: \"e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b\") " pod="metallb-system/speaker-2nt4b" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.725168 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b-metrics-certs\") pod \"speaker-2nt4b\" (UID: \"e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b\") " pod="metallb-system/speaker-2nt4b" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.725229 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/df243ac7-b567-4159-8103-103df0831280-cert\") pod \"controller-6c7b4b5f48-n9ldw\" (UID: \"df243ac7-b567-4159-8103-103df0831280\") " pod="metallb-system/controller-6c7b4b5f48-n9ldw" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.725271 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/1e670c64-e309-46e6-bdb8-797f85aee3c9-reloader\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.725302 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b-metallb-excludel2\") pod \"speaker-2nt4b\" (UID: \"e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b\") " pod="metallb-system/speaker-2nt4b" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.725323 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/df243ac7-b567-4159-8103-103df0831280-metrics-certs\") pod \"controller-6c7b4b5f48-n9ldw\" (UID: \"df243ac7-b567-4159-8103-103df0831280\") " pod="metallb-system/controller-6c7b4b5f48-n9ldw" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.725341 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b-memberlist\") pod \"speaker-2nt4b\" (UID: \"e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b\") " pod="metallb-system/speaker-2nt4b" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.725358 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6266x\" (UniqueName: \"kubernetes.io/projected/1e670c64-e309-46e6-bdb8-797f85aee3c9-kube-api-access-6266x\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.725386 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/1e670c64-e309-46e6-bdb8-797f85aee3c9-frr-conf\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.725412 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5x5dr\" (UniqueName: \"kubernetes.io/projected/91a86765-1b7c-445b-8930-dc06e96fc752-kube-api-access-5x5dr\") pod \"frr-k8s-webhook-server-6998585d5-nnxbg\" (UID: \"91a86765-1b7c-445b-8930-dc06e96fc752\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-nnxbg" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.725513 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/1e670c64-e309-46e6-bdb8-797f85aee3c9-frr-startup\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.725585 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/91a86765-1b7c-445b-8930-dc06e96fc752-cert\") pod \"frr-k8s-webhook-server-6998585d5-nnxbg\" (UID: \"91a86765-1b7c-445b-8930-dc06e96fc752\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-nnxbg" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.725613 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1e670c64-e309-46e6-bdb8-797f85aee3c9-metrics-certs\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.725633 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/1e670c64-e309-46e6-bdb8-797f85aee3c9-frr-sockets\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.725691 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfxmc\" (UniqueName: \"kubernetes.io/projected/df243ac7-b567-4159-8103-103df0831280-kube-api-access-vfxmc\") pod \"controller-6c7b4b5f48-n9ldw\" (UID: \"df243ac7-b567-4159-8103-103df0831280\") " pod="metallb-system/controller-6c7b4b5f48-n9ldw" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.725721 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/1e670c64-e309-46e6-bdb8-797f85aee3c9-metrics\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.725893 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/1e670c64-e309-46e6-bdb8-797f85aee3c9-reloader\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.725947 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/1e670c64-e309-46e6-bdb8-797f85aee3c9-frr-conf\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: E1126 05:38:48.726028 4871 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.726044 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/1e670c64-e309-46e6-bdb8-797f85aee3c9-frr-sockets\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: E1126 05:38:48.726106 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1e670c64-e309-46e6-bdb8-797f85aee3c9-metrics-certs podName:1e670c64-e309-46e6-bdb8-797f85aee3c9 nodeName:}" failed. No retries permitted until 2025-11-26 05:38:49.226076876 +0000 UTC m=+787.409128502 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1e670c64-e309-46e6-bdb8-797f85aee3c9-metrics-certs") pod "frr-k8s-dzkmx" (UID: "1e670c64-e309-46e6-bdb8-797f85aee3c9") : secret "frr-k8s-certs-secret" not found Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.726218 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/1e670c64-e309-46e6-bdb8-797f85aee3c9-metrics\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.726575 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/1e670c64-e309-46e6-bdb8-797f85aee3c9-frr-startup\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.730634 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/91a86765-1b7c-445b-8930-dc06e96fc752-cert\") pod \"frr-k8s-webhook-server-6998585d5-nnxbg\" (UID: \"91a86765-1b7c-445b-8930-dc06e96fc752\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-nnxbg" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.749192 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5x5dr\" (UniqueName: \"kubernetes.io/projected/91a86765-1b7c-445b-8930-dc06e96fc752-kube-api-access-5x5dr\") pod \"frr-k8s-webhook-server-6998585d5-nnxbg\" (UID: \"91a86765-1b7c-445b-8930-dc06e96fc752\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-nnxbg" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.749279 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6266x\" (UniqueName: \"kubernetes.io/projected/1e670c64-e309-46e6-bdb8-797f85aee3c9-kube-api-access-6266x\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.812048 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-nnxbg" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.827037 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/df243ac7-b567-4159-8103-103df0831280-cert\") pod \"controller-6c7b4b5f48-n9ldw\" (UID: \"df243ac7-b567-4159-8103-103df0831280\") " pod="metallb-system/controller-6c7b4b5f48-n9ldw" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.827084 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b-metallb-excludel2\") pod \"speaker-2nt4b\" (UID: \"e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b\") " pod="metallb-system/speaker-2nt4b" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.827114 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/df243ac7-b567-4159-8103-103df0831280-metrics-certs\") pod \"controller-6c7b4b5f48-n9ldw\" (UID: \"df243ac7-b567-4159-8103-103df0831280\") " pod="metallb-system/controller-6c7b4b5f48-n9ldw" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.827140 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b-memberlist\") pod \"speaker-2nt4b\" (UID: \"e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b\") " pod="metallb-system/speaker-2nt4b" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.827225 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfxmc\" (UniqueName: \"kubernetes.io/projected/df243ac7-b567-4159-8103-103df0831280-kube-api-access-vfxmc\") pod \"controller-6c7b4b5f48-n9ldw\" (UID: \"df243ac7-b567-4159-8103-103df0831280\") " pod="metallb-system/controller-6c7b4b5f48-n9ldw" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.827260 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rcr5\" (UniqueName: \"kubernetes.io/projected/e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b-kube-api-access-7rcr5\") pod \"speaker-2nt4b\" (UID: \"e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b\") " pod="metallb-system/speaker-2nt4b" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.827736 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b-metrics-certs\") pod \"speaker-2nt4b\" (UID: \"e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b\") " pod="metallb-system/speaker-2nt4b" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.828373 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b-metallb-excludel2\") pod \"speaker-2nt4b\" (UID: \"e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b\") " pod="metallb-system/speaker-2nt4b" Nov 26 05:38:48 crc kubenswrapper[4871]: E1126 05:38:48.829049 4871 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 26 05:38:48 crc kubenswrapper[4871]: E1126 05:38:48.829107 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b-memberlist podName:e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b nodeName:}" failed. No retries permitted until 2025-11-26 05:38:49.329090935 +0000 UTC m=+787.512142531 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b-memberlist") pod "speaker-2nt4b" (UID: "e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b") : secret "metallb-memberlist" not found Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.830737 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b-metrics-certs\") pod \"speaker-2nt4b\" (UID: \"e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b\") " pod="metallb-system/speaker-2nt4b" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.831379 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.835710 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/df243ac7-b567-4159-8103-103df0831280-metrics-certs\") pod \"controller-6c7b4b5f48-n9ldw\" (UID: \"df243ac7-b567-4159-8103-103df0831280\") " pod="metallb-system/controller-6c7b4b5f48-n9ldw" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.840792 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/df243ac7-b567-4159-8103-103df0831280-cert\") pod \"controller-6c7b4b5f48-n9ldw\" (UID: \"df243ac7-b567-4159-8103-103df0831280\") " pod="metallb-system/controller-6c7b4b5f48-n9ldw" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.857612 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rcr5\" (UniqueName: \"kubernetes.io/projected/e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b-kube-api-access-7rcr5\") pod \"speaker-2nt4b\" (UID: \"e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b\") " pod="metallb-system/speaker-2nt4b" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.860323 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfxmc\" (UniqueName: \"kubernetes.io/projected/df243ac7-b567-4159-8103-103df0831280-kube-api-access-vfxmc\") pod \"controller-6c7b4b5f48-n9ldw\" (UID: \"df243ac7-b567-4159-8103-103df0831280\") " pod="metallb-system/controller-6c7b4b5f48-n9ldw" Nov 26 05:38:48 crc kubenswrapper[4871]: I1126 05:38:48.910580 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-n9ldw" Nov 26 05:38:49 crc kubenswrapper[4871]: I1126 05:38:49.232202 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1e670c64-e309-46e6-bdb8-797f85aee3c9-metrics-certs\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:49 crc kubenswrapper[4871]: I1126 05:38:49.240412 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1e670c64-e309-46e6-bdb8-797f85aee3c9-metrics-certs\") pod \"frr-k8s-dzkmx\" (UID: \"1e670c64-e309-46e6-bdb8-797f85aee3c9\") " pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:49 crc kubenswrapper[4871]: I1126 05:38:49.291821 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-nnxbg"] Nov 26 05:38:49 crc kubenswrapper[4871]: W1126 05:38:49.302191 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod91a86765_1b7c_445b_8930_dc06e96fc752.slice/crio-446c1228ecc21f0023c4433400248794c138f97bed3198b894661c9b5a6e2397 WatchSource:0}: Error finding container 446c1228ecc21f0023c4433400248794c138f97bed3198b894661c9b5a6e2397: Status 404 returned error can't find the container with id 446c1228ecc21f0023c4433400248794c138f97bed3198b894661c9b5a6e2397 Nov 26 05:38:49 crc kubenswrapper[4871]: I1126 05:38:49.332939 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b-memberlist\") pod \"speaker-2nt4b\" (UID: \"e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b\") " pod="metallb-system/speaker-2nt4b" Nov 26 05:38:49 crc kubenswrapper[4871]: E1126 05:38:49.333136 4871 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 26 05:38:49 crc kubenswrapper[4871]: E1126 05:38:49.333184 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b-memberlist podName:e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b nodeName:}" failed. No retries permitted until 2025-11-26 05:38:50.333168865 +0000 UTC m=+788.516220451 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b-memberlist") pod "speaker-2nt4b" (UID: "e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b") : secret "metallb-memberlist" not found Nov 26 05:38:49 crc kubenswrapper[4871]: I1126 05:38:49.352045 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-n9ldw"] Nov 26 05:38:49 crc kubenswrapper[4871]: W1126 05:38:49.359929 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddf243ac7_b567_4159_8103_103df0831280.slice/crio-ab41f5a96ad31c93e4d420f01b2a9c4eb5e902c2c024b4c57ad93c42667eb0ac WatchSource:0}: Error finding container ab41f5a96ad31c93e4d420f01b2a9c4eb5e902c2c024b4c57ad93c42667eb0ac: Status 404 returned error can't find the container with id ab41f5a96ad31c93e4d420f01b2a9c4eb5e902c2c024b4c57ad93c42667eb0ac Nov 26 05:38:49 crc kubenswrapper[4871]: I1126 05:38:49.424129 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:38:50 crc kubenswrapper[4871]: I1126 05:38:50.307267 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-nnxbg" event={"ID":"91a86765-1b7c-445b-8930-dc06e96fc752","Type":"ContainerStarted","Data":"446c1228ecc21f0023c4433400248794c138f97bed3198b894661c9b5a6e2397"} Nov 26 05:38:50 crc kubenswrapper[4871]: I1126 05:38:50.308429 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dzkmx" event={"ID":"1e670c64-e309-46e6-bdb8-797f85aee3c9","Type":"ContainerStarted","Data":"e1efc0cd27b224c49100db54a28af879388e5f48ecee74be758628ff1caefbc2"} Nov 26 05:38:50 crc kubenswrapper[4871]: I1126 05:38:50.310378 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-n9ldw" event={"ID":"df243ac7-b567-4159-8103-103df0831280","Type":"ContainerStarted","Data":"b559f9c5ff3e224aace8c45e359f32488d448f1bb04f4757e24c7f6092919159"} Nov 26 05:38:50 crc kubenswrapper[4871]: I1126 05:38:50.310420 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-n9ldw" event={"ID":"df243ac7-b567-4159-8103-103df0831280","Type":"ContainerStarted","Data":"31ed02a887a2d6751f6cb856684fc04b66ddd712367361d4cf8b80c48317e9df"} Nov 26 05:38:50 crc kubenswrapper[4871]: I1126 05:38:50.310429 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-n9ldw" event={"ID":"df243ac7-b567-4159-8103-103df0831280","Type":"ContainerStarted","Data":"ab41f5a96ad31c93e4d420f01b2a9c4eb5e902c2c024b4c57ad93c42667eb0ac"} Nov 26 05:38:50 crc kubenswrapper[4871]: I1126 05:38:50.310513 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-n9ldw" Nov 26 05:38:50 crc kubenswrapper[4871]: I1126 05:38:50.333563 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-n9ldw" podStartSLOduration=2.333539617 podStartE2EDuration="2.333539617s" podCreationTimestamp="2025-11-26 05:38:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:38:50.329075744 +0000 UTC m=+788.512127330" watchObservedRunningTime="2025-11-26 05:38:50.333539617 +0000 UTC m=+788.516591203" Nov 26 05:38:50 crc kubenswrapper[4871]: I1126 05:38:50.346789 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b-memberlist\") pod \"speaker-2nt4b\" (UID: \"e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b\") " pod="metallb-system/speaker-2nt4b" Nov 26 05:38:50 crc kubenswrapper[4871]: I1126 05:38:50.353441 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b-memberlist\") pod \"speaker-2nt4b\" (UID: \"e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b\") " pod="metallb-system/speaker-2nt4b" Nov 26 05:38:50 crc kubenswrapper[4871]: I1126 05:38:50.400732 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-2nt4b" Nov 26 05:38:51 crc kubenswrapper[4871]: I1126 05:38:51.320132 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-2nt4b" event={"ID":"e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b","Type":"ContainerStarted","Data":"7465cce6652ae3e6e783247704632ec7a9a93156e25232d57c49af618e2a7ccb"} Nov 26 05:38:51 crc kubenswrapper[4871]: I1126 05:38:51.320187 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-2nt4b" event={"ID":"e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b","Type":"ContainerStarted","Data":"7af9b1e23a8fc2cdb8dd4cb76eb7e287a40f02a99f2fdde470e34e676d737983"} Nov 26 05:38:51 crc kubenswrapper[4871]: I1126 05:38:51.320197 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-2nt4b" event={"ID":"e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b","Type":"ContainerStarted","Data":"55d7a691e1a031005bb86ae9464104219426096044b3ad10f0333ea8504be73d"} Nov 26 05:38:51 crc kubenswrapper[4871]: I1126 05:38:51.320350 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-2nt4b" Nov 26 05:38:51 crc kubenswrapper[4871]: I1126 05:38:51.336318 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-2nt4b" podStartSLOduration=3.336295183 podStartE2EDuration="3.336295183s" podCreationTimestamp="2025-11-26 05:38:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:38:51.334021721 +0000 UTC m=+789.517073317" watchObservedRunningTime="2025-11-26 05:38:51.336295183 +0000 UTC m=+789.519346769" Nov 26 05:38:51 crc kubenswrapper[4871]: I1126 05:38:51.947686 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2qmw5" Nov 26 05:38:51 crc kubenswrapper[4871]: I1126 05:38:51.947733 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2qmw5" Nov 26 05:38:52 crc kubenswrapper[4871]: I1126 05:38:52.030326 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2qmw5" Nov 26 05:38:52 crc kubenswrapper[4871]: I1126 05:38:52.379408 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2qmw5" Nov 26 05:38:52 crc kubenswrapper[4871]: I1126 05:38:52.421264 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2qmw5"] Nov 26 05:38:54 crc kubenswrapper[4871]: I1126 05:38:54.337222 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2qmw5" podUID="dde8f1f0-4e60-4359-97bf-84b2aff794b3" containerName="registry-server" containerID="cri-o://8397bd27c270817eb66b5f3313bc1305ba9b5ab4f15181c348bf5ccb2cf9bc9c" gracePeriod=2 Nov 26 05:38:55 crc kubenswrapper[4871]: I1126 05:38:55.345327 4871 generic.go:334] "Generic (PLEG): container finished" podID="dde8f1f0-4e60-4359-97bf-84b2aff794b3" containerID="8397bd27c270817eb66b5f3313bc1305ba9b5ab4f15181c348bf5ccb2cf9bc9c" exitCode=0 Nov 26 05:38:55 crc kubenswrapper[4871]: I1126 05:38:55.345371 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2qmw5" event={"ID":"dde8f1f0-4e60-4359-97bf-84b2aff794b3","Type":"ContainerDied","Data":"8397bd27c270817eb66b5f3313bc1305ba9b5ab4f15181c348bf5ccb2cf9bc9c"} Nov 26 05:38:56 crc kubenswrapper[4871]: I1126 05:38:56.420169 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2qmw5" Nov 26 05:38:56 crc kubenswrapper[4871]: I1126 05:38:56.534549 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dde8f1f0-4e60-4359-97bf-84b2aff794b3-utilities\") pod \"dde8f1f0-4e60-4359-97bf-84b2aff794b3\" (UID: \"dde8f1f0-4e60-4359-97bf-84b2aff794b3\") " Nov 26 05:38:56 crc kubenswrapper[4871]: I1126 05:38:56.534657 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dde8f1f0-4e60-4359-97bf-84b2aff794b3-catalog-content\") pod \"dde8f1f0-4e60-4359-97bf-84b2aff794b3\" (UID: \"dde8f1f0-4e60-4359-97bf-84b2aff794b3\") " Nov 26 05:38:56 crc kubenswrapper[4871]: I1126 05:38:56.534715 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvdcc\" (UniqueName: \"kubernetes.io/projected/dde8f1f0-4e60-4359-97bf-84b2aff794b3-kube-api-access-zvdcc\") pod \"dde8f1f0-4e60-4359-97bf-84b2aff794b3\" (UID: \"dde8f1f0-4e60-4359-97bf-84b2aff794b3\") " Nov 26 05:38:56 crc kubenswrapper[4871]: I1126 05:38:56.536603 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dde8f1f0-4e60-4359-97bf-84b2aff794b3-utilities" (OuterVolumeSpecName: "utilities") pod "dde8f1f0-4e60-4359-97bf-84b2aff794b3" (UID: "dde8f1f0-4e60-4359-97bf-84b2aff794b3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:38:56 crc kubenswrapper[4871]: I1126 05:38:56.540741 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dde8f1f0-4e60-4359-97bf-84b2aff794b3-kube-api-access-zvdcc" (OuterVolumeSpecName: "kube-api-access-zvdcc") pod "dde8f1f0-4e60-4359-97bf-84b2aff794b3" (UID: "dde8f1f0-4e60-4359-97bf-84b2aff794b3"). InnerVolumeSpecName "kube-api-access-zvdcc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:38:56 crc kubenswrapper[4871]: I1126 05:38:56.550432 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dde8f1f0-4e60-4359-97bf-84b2aff794b3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dde8f1f0-4e60-4359-97bf-84b2aff794b3" (UID: "dde8f1f0-4e60-4359-97bf-84b2aff794b3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:38:56 crc kubenswrapper[4871]: I1126 05:38:56.636131 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dde8f1f0-4e60-4359-97bf-84b2aff794b3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:56 crc kubenswrapper[4871]: I1126 05:38:56.636178 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvdcc\" (UniqueName: \"kubernetes.io/projected/dde8f1f0-4e60-4359-97bf-84b2aff794b3-kube-api-access-zvdcc\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:56 crc kubenswrapper[4871]: I1126 05:38:56.636193 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dde8f1f0-4e60-4359-97bf-84b2aff794b3-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:38:57 crc kubenswrapper[4871]: I1126 05:38:57.361013 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2qmw5" Nov 26 05:38:57 crc kubenswrapper[4871]: I1126 05:38:57.361069 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2qmw5" event={"ID":"dde8f1f0-4e60-4359-97bf-84b2aff794b3","Type":"ContainerDied","Data":"6177aba753a7cca16a18f2bfff1be66ccc4ff2f9fd066c8449148dc1256aa52e"} Nov 26 05:38:57 crc kubenswrapper[4871]: I1126 05:38:57.361202 4871 scope.go:117] "RemoveContainer" containerID="8397bd27c270817eb66b5f3313bc1305ba9b5ab4f15181c348bf5ccb2cf9bc9c" Nov 26 05:38:57 crc kubenswrapper[4871]: I1126 05:38:57.362992 4871 generic.go:334] "Generic (PLEG): container finished" podID="1e670c64-e309-46e6-bdb8-797f85aee3c9" containerID="16cdd8775f14f0b2b2e18c2f926f05378722b1ac46045f2636349f58d0354022" exitCode=0 Nov 26 05:38:57 crc kubenswrapper[4871]: I1126 05:38:57.363063 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dzkmx" event={"ID":"1e670c64-e309-46e6-bdb8-797f85aee3c9","Type":"ContainerDied","Data":"16cdd8775f14f0b2b2e18c2f926f05378722b1ac46045f2636349f58d0354022"} Nov 26 05:38:57 crc kubenswrapper[4871]: I1126 05:38:57.368918 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-nnxbg" event={"ID":"91a86765-1b7c-445b-8930-dc06e96fc752","Type":"ContainerStarted","Data":"89bd982ae5ff568813f1da4e14ebf7026cc6c95c4c5f40c188dd13363013e9a3"} Nov 26 05:38:57 crc kubenswrapper[4871]: I1126 05:38:57.369154 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-nnxbg" Nov 26 05:38:57 crc kubenswrapper[4871]: I1126 05:38:57.401103 4871 scope.go:117] "RemoveContainer" containerID="8024d773251298d8fd3bdd592dea20410891f3ea59f48f7c721189063574c48a" Nov 26 05:38:57 crc kubenswrapper[4871]: I1126 05:38:57.438558 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-nnxbg" podStartSLOduration=2.555302916 podStartE2EDuration="9.4385195s" podCreationTimestamp="2025-11-26 05:38:48 +0000 UTC" firstStartedPulling="2025-11-26 05:38:49.305716584 +0000 UTC m=+787.488768170" lastFinishedPulling="2025-11-26 05:38:56.188933148 +0000 UTC m=+794.371984754" observedRunningTime="2025-11-26 05:38:57.431728234 +0000 UTC m=+795.614779950" watchObservedRunningTime="2025-11-26 05:38:57.4385195 +0000 UTC m=+795.621571096" Nov 26 05:38:57 crc kubenswrapper[4871]: I1126 05:38:57.457723 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2qmw5"] Nov 26 05:38:57 crc kubenswrapper[4871]: I1126 05:38:57.463853 4871 scope.go:117] "RemoveContainer" containerID="1bc1c6fe1dfe1f75f17f01516178b908b82b259546fde8b573c98b386ef3f61a" Nov 26 05:38:57 crc kubenswrapper[4871]: I1126 05:38:57.464278 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2qmw5"] Nov 26 05:38:58 crc kubenswrapper[4871]: I1126 05:38:58.378979 4871 generic.go:334] "Generic (PLEG): container finished" podID="1e670c64-e309-46e6-bdb8-797f85aee3c9" containerID="dcedd1a3c272e86c380e95f7e5274719d5c73cfa48e6aec67da409becdb76ed3" exitCode=0 Nov 26 05:38:58 crc kubenswrapper[4871]: I1126 05:38:58.379043 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dzkmx" event={"ID":"1e670c64-e309-46e6-bdb8-797f85aee3c9","Type":"ContainerDied","Data":"dcedd1a3c272e86c380e95f7e5274719d5c73cfa48e6aec67da409becdb76ed3"} Nov 26 05:38:58 crc kubenswrapper[4871]: I1126 05:38:58.516077 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dde8f1f0-4e60-4359-97bf-84b2aff794b3" path="/var/lib/kubelet/pods/dde8f1f0-4e60-4359-97bf-84b2aff794b3/volumes" Nov 26 05:38:59 crc kubenswrapper[4871]: I1126 05:38:59.405825 4871 generic.go:334] "Generic (PLEG): container finished" podID="1e670c64-e309-46e6-bdb8-797f85aee3c9" containerID="385a30fe3953335b2caeb4477882ecf994374564cccc1d15170ef2d05b683f19" exitCode=0 Nov 26 05:38:59 crc kubenswrapper[4871]: I1126 05:38:59.405900 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dzkmx" event={"ID":"1e670c64-e309-46e6-bdb8-797f85aee3c9","Type":"ContainerDied","Data":"385a30fe3953335b2caeb4477882ecf994374564cccc1d15170ef2d05b683f19"} Nov 26 05:39:00 crc kubenswrapper[4871]: I1126 05:39:00.406844 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-2nt4b" Nov 26 05:39:00 crc kubenswrapper[4871]: I1126 05:39:00.421394 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dzkmx" event={"ID":"1e670c64-e309-46e6-bdb8-797f85aee3c9","Type":"ContainerStarted","Data":"141b6f3c555a4f510a8da984d7941b2f5beacb81bd7be25ae10cffdb93cfe6c1"} Nov 26 05:39:00 crc kubenswrapper[4871]: I1126 05:39:00.421453 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dzkmx" event={"ID":"1e670c64-e309-46e6-bdb8-797f85aee3c9","Type":"ContainerStarted","Data":"4728a83b3172c28dc69b9a9fa295efa3714159a6751e9fbce83f4f47361a8fe4"} Nov 26 05:39:00 crc kubenswrapper[4871]: I1126 05:39:00.421472 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dzkmx" event={"ID":"1e670c64-e309-46e6-bdb8-797f85aee3c9","Type":"ContainerStarted","Data":"10f59208f570bd9bddd1af991f551bcaca5356eff45dbf1451905dd2c22c816e"} Nov 26 05:39:00 crc kubenswrapper[4871]: I1126 05:39:00.421490 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dzkmx" event={"ID":"1e670c64-e309-46e6-bdb8-797f85aee3c9","Type":"ContainerStarted","Data":"a43ec5221c098248d54046a5808054b421236bafdde9c9ab0c2d32bd36ccab97"} Nov 26 05:39:01 crc kubenswrapper[4871]: I1126 05:39:01.430709 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dzkmx" event={"ID":"1e670c64-e309-46e6-bdb8-797f85aee3c9","Type":"ContainerStarted","Data":"3ef863b3b3be02be7fb5b3bf183eba1971bae12aaae2a794619fd949cfe0290b"} Nov 26 05:39:01 crc kubenswrapper[4871]: I1126 05:39:01.430981 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-dzkmx" event={"ID":"1e670c64-e309-46e6-bdb8-797f85aee3c9","Type":"ContainerStarted","Data":"49100c9f62bbab6a00975881ded23e36eb9698c18036286379766b760434d15f"} Nov 26 05:39:01 crc kubenswrapper[4871]: I1126 05:39:01.430997 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:39:01 crc kubenswrapper[4871]: I1126 05:39:01.452072 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-dzkmx" podStartSLOduration=6.753851492 podStartE2EDuration="13.452052252s" podCreationTimestamp="2025-11-26 05:38:48 +0000 UTC" firstStartedPulling="2025-11-26 05:38:49.537746959 +0000 UTC m=+787.720798545" lastFinishedPulling="2025-11-26 05:38:56.235947709 +0000 UTC m=+794.418999305" observedRunningTime="2025-11-26 05:39:01.449180486 +0000 UTC m=+799.632232092" watchObservedRunningTime="2025-11-26 05:39:01.452052252 +0000 UTC m=+799.635103848" Nov 26 05:39:03 crc kubenswrapper[4871]: I1126 05:39:03.194793 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-vqzv8"] Nov 26 05:39:03 crc kubenswrapper[4871]: E1126 05:39:03.195055 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dde8f1f0-4e60-4359-97bf-84b2aff794b3" containerName="extract-content" Nov 26 05:39:03 crc kubenswrapper[4871]: I1126 05:39:03.195068 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="dde8f1f0-4e60-4359-97bf-84b2aff794b3" containerName="extract-content" Nov 26 05:39:03 crc kubenswrapper[4871]: E1126 05:39:03.195080 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dde8f1f0-4e60-4359-97bf-84b2aff794b3" containerName="registry-server" Nov 26 05:39:03 crc kubenswrapper[4871]: I1126 05:39:03.195086 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="dde8f1f0-4e60-4359-97bf-84b2aff794b3" containerName="registry-server" Nov 26 05:39:03 crc kubenswrapper[4871]: E1126 05:39:03.195095 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dde8f1f0-4e60-4359-97bf-84b2aff794b3" containerName="extract-utilities" Nov 26 05:39:03 crc kubenswrapper[4871]: I1126 05:39:03.195102 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="dde8f1f0-4e60-4359-97bf-84b2aff794b3" containerName="extract-utilities" Nov 26 05:39:03 crc kubenswrapper[4871]: I1126 05:39:03.195217 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="dde8f1f0-4e60-4359-97bf-84b2aff794b3" containerName="registry-server" Nov 26 05:39:03 crc kubenswrapper[4871]: I1126 05:39:03.195614 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vqzv8" Nov 26 05:39:03 crc kubenswrapper[4871]: I1126 05:39:03.206117 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 26 05:39:03 crc kubenswrapper[4871]: I1126 05:39:03.206268 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 26 05:39:03 crc kubenswrapper[4871]: I1126 05:39:03.209651 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-ngt4p" Nov 26 05:39:03 crc kubenswrapper[4871]: I1126 05:39:03.212644 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-vqzv8"] Nov 26 05:39:03 crc kubenswrapper[4871]: I1126 05:39:03.335391 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcpxl\" (UniqueName: \"kubernetes.io/projected/53789852-3d44-44be-ba18-a3964336c005-kube-api-access-xcpxl\") pod \"openstack-operator-index-vqzv8\" (UID: \"53789852-3d44-44be-ba18-a3964336c005\") " pod="openstack-operators/openstack-operator-index-vqzv8" Nov 26 05:39:03 crc kubenswrapper[4871]: I1126 05:39:03.437226 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcpxl\" (UniqueName: \"kubernetes.io/projected/53789852-3d44-44be-ba18-a3964336c005-kube-api-access-xcpxl\") pod \"openstack-operator-index-vqzv8\" (UID: \"53789852-3d44-44be-ba18-a3964336c005\") " pod="openstack-operators/openstack-operator-index-vqzv8" Nov 26 05:39:03 crc kubenswrapper[4871]: I1126 05:39:03.455770 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcpxl\" (UniqueName: \"kubernetes.io/projected/53789852-3d44-44be-ba18-a3964336c005-kube-api-access-xcpxl\") pod \"openstack-operator-index-vqzv8\" (UID: \"53789852-3d44-44be-ba18-a3964336c005\") " pod="openstack-operators/openstack-operator-index-vqzv8" Nov 26 05:39:03 crc kubenswrapper[4871]: I1126 05:39:03.534295 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vqzv8" Nov 26 05:39:04 crc kubenswrapper[4871]: I1126 05:39:04.002349 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-vqzv8"] Nov 26 05:39:04 crc kubenswrapper[4871]: I1126 05:39:04.425199 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:39:04 crc kubenswrapper[4871]: I1126 05:39:04.454136 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vqzv8" event={"ID":"53789852-3d44-44be-ba18-a3964336c005","Type":"ContainerStarted","Data":"3f9b73397e85a165eb534fd3e53774b8e54c0fe0b89ef606023e02cbdcf40509"} Nov 26 05:39:04 crc kubenswrapper[4871]: I1126 05:39:04.475200 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:39:05 crc kubenswrapper[4871]: I1126 05:39:05.463870 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vqzv8" event={"ID":"53789852-3d44-44be-ba18-a3964336c005","Type":"ContainerStarted","Data":"d42e588f717bf0015f709053be53318b49258c056ddb8f40ca225a508d26d161"} Nov 26 05:39:05 crc kubenswrapper[4871]: I1126 05:39:05.478423 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-vqzv8" podStartSLOduration=1.705294443 podStartE2EDuration="2.478406029s" podCreationTimestamp="2025-11-26 05:39:03 +0000 UTC" firstStartedPulling="2025-11-26 05:39:04.016856404 +0000 UTC m=+802.199908050" lastFinishedPulling="2025-11-26 05:39:04.78996804 +0000 UTC m=+802.973019636" observedRunningTime="2025-11-26 05:39:05.477311404 +0000 UTC m=+803.660362990" watchObservedRunningTime="2025-11-26 05:39:05.478406029 +0000 UTC m=+803.661457615" Nov 26 05:39:06 crc kubenswrapper[4871]: I1126 05:39:06.555725 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-vqzv8"] Nov 26 05:39:07 crc kubenswrapper[4871]: I1126 05:39:07.157816 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-pwvh2"] Nov 26 05:39:07 crc kubenswrapper[4871]: I1126 05:39:07.159479 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-pwvh2" Nov 26 05:39:07 crc kubenswrapper[4871]: I1126 05:39:07.177083 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-pwvh2"] Nov 26 05:39:07 crc kubenswrapper[4871]: I1126 05:39:07.302279 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xpp9\" (UniqueName: \"kubernetes.io/projected/87895915-b98b-423d-b00c-9dd92656f1a8-kube-api-access-6xpp9\") pod \"openstack-operator-index-pwvh2\" (UID: \"87895915-b98b-423d-b00c-9dd92656f1a8\") " pod="openstack-operators/openstack-operator-index-pwvh2" Nov 26 05:39:07 crc kubenswrapper[4871]: I1126 05:39:07.403955 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xpp9\" (UniqueName: \"kubernetes.io/projected/87895915-b98b-423d-b00c-9dd92656f1a8-kube-api-access-6xpp9\") pod \"openstack-operator-index-pwvh2\" (UID: \"87895915-b98b-423d-b00c-9dd92656f1a8\") " pod="openstack-operators/openstack-operator-index-pwvh2" Nov 26 05:39:07 crc kubenswrapper[4871]: I1126 05:39:07.433576 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xpp9\" (UniqueName: \"kubernetes.io/projected/87895915-b98b-423d-b00c-9dd92656f1a8-kube-api-access-6xpp9\") pod \"openstack-operator-index-pwvh2\" (UID: \"87895915-b98b-423d-b00c-9dd92656f1a8\") " pod="openstack-operators/openstack-operator-index-pwvh2" Nov 26 05:39:07 crc kubenswrapper[4871]: I1126 05:39:07.479838 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-vqzv8" podUID="53789852-3d44-44be-ba18-a3964336c005" containerName="registry-server" containerID="cri-o://d42e588f717bf0015f709053be53318b49258c056ddb8f40ca225a508d26d161" gracePeriod=2 Nov 26 05:39:07 crc kubenswrapper[4871]: I1126 05:39:07.492583 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-pwvh2" Nov 26 05:39:07 crc kubenswrapper[4871]: I1126 05:39:07.965515 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-pwvh2"] Nov 26 05:39:07 crc kubenswrapper[4871]: I1126 05:39:07.978573 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vqzv8" Nov 26 05:39:08 crc kubenswrapper[4871]: I1126 05:39:08.112768 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcpxl\" (UniqueName: \"kubernetes.io/projected/53789852-3d44-44be-ba18-a3964336c005-kube-api-access-xcpxl\") pod \"53789852-3d44-44be-ba18-a3964336c005\" (UID: \"53789852-3d44-44be-ba18-a3964336c005\") " Nov 26 05:39:08 crc kubenswrapper[4871]: I1126 05:39:08.118328 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53789852-3d44-44be-ba18-a3964336c005-kube-api-access-xcpxl" (OuterVolumeSpecName: "kube-api-access-xcpxl") pod "53789852-3d44-44be-ba18-a3964336c005" (UID: "53789852-3d44-44be-ba18-a3964336c005"). InnerVolumeSpecName "kube-api-access-xcpxl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:39:08 crc kubenswrapper[4871]: I1126 05:39:08.214058 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcpxl\" (UniqueName: \"kubernetes.io/projected/53789852-3d44-44be-ba18-a3964336c005-kube-api-access-xcpxl\") on node \"crc\" DevicePath \"\"" Nov 26 05:39:08 crc kubenswrapper[4871]: I1126 05:39:08.492427 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-pwvh2" event={"ID":"87895915-b98b-423d-b00c-9dd92656f1a8","Type":"ContainerStarted","Data":"04d7f313e8765bba4b3490ab0a019dfee58d637d6277aba7301fbadc8996384d"} Nov 26 05:39:08 crc kubenswrapper[4871]: I1126 05:39:08.495034 4871 generic.go:334] "Generic (PLEG): container finished" podID="53789852-3d44-44be-ba18-a3964336c005" containerID="d42e588f717bf0015f709053be53318b49258c056ddb8f40ca225a508d26d161" exitCode=0 Nov 26 05:39:08 crc kubenswrapper[4871]: I1126 05:39:08.495097 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vqzv8" event={"ID":"53789852-3d44-44be-ba18-a3964336c005","Type":"ContainerDied","Data":"d42e588f717bf0015f709053be53318b49258c056ddb8f40ca225a508d26d161"} Nov 26 05:39:08 crc kubenswrapper[4871]: I1126 05:39:08.495111 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-vqzv8" Nov 26 05:39:08 crc kubenswrapper[4871]: I1126 05:39:08.495148 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-vqzv8" event={"ID":"53789852-3d44-44be-ba18-a3964336c005","Type":"ContainerDied","Data":"3f9b73397e85a165eb534fd3e53774b8e54c0fe0b89ef606023e02cbdcf40509"} Nov 26 05:39:08 crc kubenswrapper[4871]: I1126 05:39:08.495176 4871 scope.go:117] "RemoveContainer" containerID="d42e588f717bf0015f709053be53318b49258c056ddb8f40ca225a508d26d161" Nov 26 05:39:08 crc kubenswrapper[4871]: I1126 05:39:08.528611 4871 scope.go:117] "RemoveContainer" containerID="d42e588f717bf0015f709053be53318b49258c056ddb8f40ca225a508d26d161" Nov 26 05:39:08 crc kubenswrapper[4871]: E1126 05:39:08.529269 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d42e588f717bf0015f709053be53318b49258c056ddb8f40ca225a508d26d161\": container with ID starting with d42e588f717bf0015f709053be53318b49258c056ddb8f40ca225a508d26d161 not found: ID does not exist" containerID="d42e588f717bf0015f709053be53318b49258c056ddb8f40ca225a508d26d161" Nov 26 05:39:08 crc kubenswrapper[4871]: I1126 05:39:08.529343 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d42e588f717bf0015f709053be53318b49258c056ddb8f40ca225a508d26d161"} err="failed to get container status \"d42e588f717bf0015f709053be53318b49258c056ddb8f40ca225a508d26d161\": rpc error: code = NotFound desc = could not find container \"d42e588f717bf0015f709053be53318b49258c056ddb8f40ca225a508d26d161\": container with ID starting with d42e588f717bf0015f709053be53318b49258c056ddb8f40ca225a508d26d161 not found: ID does not exist" Nov 26 05:39:08 crc kubenswrapper[4871]: I1126 05:39:08.557279 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-vqzv8"] Nov 26 05:39:08 crc kubenswrapper[4871]: I1126 05:39:08.563904 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-vqzv8"] Nov 26 05:39:08 crc kubenswrapper[4871]: I1126 05:39:08.820829 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-nnxbg" Nov 26 05:39:08 crc kubenswrapper[4871]: I1126 05:39:08.916633 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-n9ldw" Nov 26 05:39:09 crc kubenswrapper[4871]: I1126 05:39:09.427218 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-dzkmx" Nov 26 05:39:09 crc kubenswrapper[4871]: I1126 05:39:09.505099 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-pwvh2" event={"ID":"87895915-b98b-423d-b00c-9dd92656f1a8","Type":"ContainerStarted","Data":"7bb7bc69329e335ac7c9415f040087c87abf7f86a779a00d023bbf027376a8f8"} Nov 26 05:39:09 crc kubenswrapper[4871]: I1126 05:39:09.531909 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-pwvh2" podStartSLOduration=2.092748602 podStartE2EDuration="2.531881839s" podCreationTimestamp="2025-11-26 05:39:07 +0000 UTC" firstStartedPulling="2025-11-26 05:39:07.979211469 +0000 UTC m=+806.162263065" lastFinishedPulling="2025-11-26 05:39:08.418344676 +0000 UTC m=+806.601396302" observedRunningTime="2025-11-26 05:39:09.524835547 +0000 UTC m=+807.707887203" watchObservedRunningTime="2025-11-26 05:39:09.531881839 +0000 UTC m=+807.714933465" Nov 26 05:39:10 crc kubenswrapper[4871]: I1126 05:39:10.514757 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53789852-3d44-44be-ba18-a3964336c005" path="/var/lib/kubelet/pods/53789852-3d44-44be-ba18-a3964336c005/volumes" Nov 26 05:39:17 crc kubenswrapper[4871]: I1126 05:39:17.492992 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-pwvh2" Nov 26 05:39:17 crc kubenswrapper[4871]: I1126 05:39:17.493707 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-pwvh2" Nov 26 05:39:17 crc kubenswrapper[4871]: I1126 05:39:17.537817 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-pwvh2" Nov 26 05:39:17 crc kubenswrapper[4871]: I1126 05:39:17.610911 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-pwvh2" Nov 26 05:39:24 crc kubenswrapper[4871]: I1126 05:39:24.383621 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-khc9n"] Nov 26 05:39:24 crc kubenswrapper[4871]: E1126 05:39:24.384375 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53789852-3d44-44be-ba18-a3964336c005" containerName="registry-server" Nov 26 05:39:24 crc kubenswrapper[4871]: I1126 05:39:24.384398 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="53789852-3d44-44be-ba18-a3964336c005" containerName="registry-server" Nov 26 05:39:24 crc kubenswrapper[4871]: I1126 05:39:24.384683 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="53789852-3d44-44be-ba18-a3964336c005" containerName="registry-server" Nov 26 05:39:24 crc kubenswrapper[4871]: I1126 05:39:24.390485 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-khc9n" Nov 26 05:39:24 crc kubenswrapper[4871]: I1126 05:39:24.398480 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-khc9n"] Nov 26 05:39:24 crc kubenswrapper[4871]: I1126 05:39:24.457309 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ae96304-ccbd-4621-85e9-34789c3a1afe-catalog-content\") pod \"certified-operators-khc9n\" (UID: \"2ae96304-ccbd-4621-85e9-34789c3a1afe\") " pod="openshift-marketplace/certified-operators-khc9n" Nov 26 05:39:24 crc kubenswrapper[4871]: I1126 05:39:24.457371 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ae96304-ccbd-4621-85e9-34789c3a1afe-utilities\") pod \"certified-operators-khc9n\" (UID: \"2ae96304-ccbd-4621-85e9-34789c3a1afe\") " pod="openshift-marketplace/certified-operators-khc9n" Nov 26 05:39:24 crc kubenswrapper[4871]: I1126 05:39:24.457458 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4zpg\" (UniqueName: \"kubernetes.io/projected/2ae96304-ccbd-4621-85e9-34789c3a1afe-kube-api-access-l4zpg\") pod \"certified-operators-khc9n\" (UID: \"2ae96304-ccbd-4621-85e9-34789c3a1afe\") " pod="openshift-marketplace/certified-operators-khc9n" Nov 26 05:39:24 crc kubenswrapper[4871]: I1126 05:39:24.559295 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ae96304-ccbd-4621-85e9-34789c3a1afe-utilities\") pod \"certified-operators-khc9n\" (UID: \"2ae96304-ccbd-4621-85e9-34789c3a1afe\") " pod="openshift-marketplace/certified-operators-khc9n" Nov 26 05:39:24 crc kubenswrapper[4871]: I1126 05:39:24.559379 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4zpg\" (UniqueName: \"kubernetes.io/projected/2ae96304-ccbd-4621-85e9-34789c3a1afe-kube-api-access-l4zpg\") pod \"certified-operators-khc9n\" (UID: \"2ae96304-ccbd-4621-85e9-34789c3a1afe\") " pod="openshift-marketplace/certified-operators-khc9n" Nov 26 05:39:24 crc kubenswrapper[4871]: I1126 05:39:24.559511 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ae96304-ccbd-4621-85e9-34789c3a1afe-catalog-content\") pod \"certified-operators-khc9n\" (UID: \"2ae96304-ccbd-4621-85e9-34789c3a1afe\") " pod="openshift-marketplace/certified-operators-khc9n" Nov 26 05:39:24 crc kubenswrapper[4871]: I1126 05:39:24.559845 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ae96304-ccbd-4621-85e9-34789c3a1afe-utilities\") pod \"certified-operators-khc9n\" (UID: \"2ae96304-ccbd-4621-85e9-34789c3a1afe\") " pod="openshift-marketplace/certified-operators-khc9n" Nov 26 05:39:24 crc kubenswrapper[4871]: I1126 05:39:24.559996 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ae96304-ccbd-4621-85e9-34789c3a1afe-catalog-content\") pod \"certified-operators-khc9n\" (UID: \"2ae96304-ccbd-4621-85e9-34789c3a1afe\") " pod="openshift-marketplace/certified-operators-khc9n" Nov 26 05:39:24 crc kubenswrapper[4871]: I1126 05:39:24.581748 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4zpg\" (UniqueName: \"kubernetes.io/projected/2ae96304-ccbd-4621-85e9-34789c3a1afe-kube-api-access-l4zpg\") pod \"certified-operators-khc9n\" (UID: \"2ae96304-ccbd-4621-85e9-34789c3a1afe\") " pod="openshift-marketplace/certified-operators-khc9n" Nov 26 05:39:24 crc kubenswrapper[4871]: I1126 05:39:24.744906 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-khc9n" Nov 26 05:39:25 crc kubenswrapper[4871]: I1126 05:39:25.007322 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b"] Nov 26 05:39:25 crc kubenswrapper[4871]: I1126 05:39:25.008731 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b" Nov 26 05:39:25 crc kubenswrapper[4871]: I1126 05:39:25.017185 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-8dhkf" Nov 26 05:39:25 crc kubenswrapper[4871]: I1126 05:39:25.017867 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b"] Nov 26 05:39:25 crc kubenswrapper[4871]: I1126 05:39:25.066118 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2a991e15-5da4-457c-95b8-64e0ba0b7f0c-bundle\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b\" (UID: \"2a991e15-5da4-457c-95b8-64e0ba0b7f0c\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b" Nov 26 05:39:25 crc kubenswrapper[4871]: I1126 05:39:25.066195 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p57qt\" (UniqueName: \"kubernetes.io/projected/2a991e15-5da4-457c-95b8-64e0ba0b7f0c-kube-api-access-p57qt\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b\" (UID: \"2a991e15-5da4-457c-95b8-64e0ba0b7f0c\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b" Nov 26 05:39:25 crc kubenswrapper[4871]: I1126 05:39:25.066230 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2a991e15-5da4-457c-95b8-64e0ba0b7f0c-util\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b\" (UID: \"2a991e15-5da4-457c-95b8-64e0ba0b7f0c\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b" Nov 26 05:39:25 crc kubenswrapper[4871]: I1126 05:39:25.167759 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p57qt\" (UniqueName: \"kubernetes.io/projected/2a991e15-5da4-457c-95b8-64e0ba0b7f0c-kube-api-access-p57qt\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b\" (UID: \"2a991e15-5da4-457c-95b8-64e0ba0b7f0c\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b" Nov 26 05:39:25 crc kubenswrapper[4871]: I1126 05:39:25.167821 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2a991e15-5da4-457c-95b8-64e0ba0b7f0c-util\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b\" (UID: \"2a991e15-5da4-457c-95b8-64e0ba0b7f0c\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b" Nov 26 05:39:25 crc kubenswrapper[4871]: I1126 05:39:25.167863 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2a991e15-5da4-457c-95b8-64e0ba0b7f0c-bundle\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b\" (UID: \"2a991e15-5da4-457c-95b8-64e0ba0b7f0c\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b" Nov 26 05:39:25 crc kubenswrapper[4871]: I1126 05:39:25.168281 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2a991e15-5da4-457c-95b8-64e0ba0b7f0c-util\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b\" (UID: \"2a991e15-5da4-457c-95b8-64e0ba0b7f0c\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b" Nov 26 05:39:25 crc kubenswrapper[4871]: I1126 05:39:25.168291 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2a991e15-5da4-457c-95b8-64e0ba0b7f0c-bundle\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b\" (UID: \"2a991e15-5da4-457c-95b8-64e0ba0b7f0c\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b" Nov 26 05:39:25 crc kubenswrapper[4871]: I1126 05:39:25.172321 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-khc9n"] Nov 26 05:39:25 crc kubenswrapper[4871]: W1126 05:39:25.183701 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2ae96304_ccbd_4621_85e9_34789c3a1afe.slice/crio-eb0294ebc5c2df0d6417aa34169487100e12a5005b0d4adab7e592cb3cbfee0f WatchSource:0}: Error finding container eb0294ebc5c2df0d6417aa34169487100e12a5005b0d4adab7e592cb3cbfee0f: Status 404 returned error can't find the container with id eb0294ebc5c2df0d6417aa34169487100e12a5005b0d4adab7e592cb3cbfee0f Nov 26 05:39:25 crc kubenswrapper[4871]: I1126 05:39:25.189241 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p57qt\" (UniqueName: \"kubernetes.io/projected/2a991e15-5da4-457c-95b8-64e0ba0b7f0c-kube-api-access-p57qt\") pod \"a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b\" (UID: \"2a991e15-5da4-457c-95b8-64e0ba0b7f0c\") " pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b" Nov 26 05:39:25 crc kubenswrapper[4871]: I1126 05:39:25.327688 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b" Nov 26 05:39:25 crc kubenswrapper[4871]: I1126 05:39:25.632734 4871 generic.go:334] "Generic (PLEG): container finished" podID="2ae96304-ccbd-4621-85e9-34789c3a1afe" containerID="a1e5f96b383eeab55f99088e0e56661478f9c0587fa58317a321f2c436d682dd" exitCode=0 Nov 26 05:39:25 crc kubenswrapper[4871]: I1126 05:39:25.632783 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-khc9n" event={"ID":"2ae96304-ccbd-4621-85e9-34789c3a1afe","Type":"ContainerDied","Data":"a1e5f96b383eeab55f99088e0e56661478f9c0587fa58317a321f2c436d682dd"} Nov 26 05:39:25 crc kubenswrapper[4871]: I1126 05:39:25.632813 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-khc9n" event={"ID":"2ae96304-ccbd-4621-85e9-34789c3a1afe","Type":"ContainerStarted","Data":"eb0294ebc5c2df0d6417aa34169487100e12a5005b0d4adab7e592cb3cbfee0f"} Nov 26 05:39:25 crc kubenswrapper[4871]: I1126 05:39:25.735752 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b"] Nov 26 05:39:25 crc kubenswrapper[4871]: W1126 05:39:25.749799 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2a991e15_5da4_457c_95b8_64e0ba0b7f0c.slice/crio-0bf8e2bfb20622eb6dfd8bbe90dc5e42e8e0d8cad5afd01e3367705a4d36bf38 WatchSource:0}: Error finding container 0bf8e2bfb20622eb6dfd8bbe90dc5e42e8e0d8cad5afd01e3367705a4d36bf38: Status 404 returned error can't find the container with id 0bf8e2bfb20622eb6dfd8bbe90dc5e42e8e0d8cad5afd01e3367705a4d36bf38 Nov 26 05:39:26 crc kubenswrapper[4871]: I1126 05:39:26.643768 4871 generic.go:334] "Generic (PLEG): container finished" podID="2a991e15-5da4-457c-95b8-64e0ba0b7f0c" containerID="0ad18fdb9bdc5e586450709193259209a32f4b1fa33416ef07ee12ed1445e7c5" exitCode=0 Nov 26 05:39:26 crc kubenswrapper[4871]: I1126 05:39:26.643928 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b" event={"ID":"2a991e15-5da4-457c-95b8-64e0ba0b7f0c","Type":"ContainerDied","Data":"0ad18fdb9bdc5e586450709193259209a32f4b1fa33416ef07ee12ed1445e7c5"} Nov 26 05:39:26 crc kubenswrapper[4871]: I1126 05:39:26.645063 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b" event={"ID":"2a991e15-5da4-457c-95b8-64e0ba0b7f0c","Type":"ContainerStarted","Data":"0bf8e2bfb20622eb6dfd8bbe90dc5e42e8e0d8cad5afd01e3367705a4d36bf38"} Nov 26 05:39:26 crc kubenswrapper[4871]: I1126 05:39:26.646996 4871 generic.go:334] "Generic (PLEG): container finished" podID="2ae96304-ccbd-4621-85e9-34789c3a1afe" containerID="c4ccb902c8c6ce1f192462127d7926e272cd23720ca392c048bf8e9ac595ee5b" exitCode=0 Nov 26 05:39:26 crc kubenswrapper[4871]: I1126 05:39:26.647037 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-khc9n" event={"ID":"2ae96304-ccbd-4621-85e9-34789c3a1afe","Type":"ContainerDied","Data":"c4ccb902c8c6ce1f192462127d7926e272cd23720ca392c048bf8e9ac595ee5b"} Nov 26 05:39:27 crc kubenswrapper[4871]: I1126 05:39:27.655689 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-khc9n" event={"ID":"2ae96304-ccbd-4621-85e9-34789c3a1afe","Type":"ContainerStarted","Data":"6143a09edcabd5bf463b9eebe61d1808c93755c42bcaa6709cef8c97b3adcdd7"} Nov 26 05:39:27 crc kubenswrapper[4871]: I1126 05:39:27.657775 4871 generic.go:334] "Generic (PLEG): container finished" podID="2a991e15-5da4-457c-95b8-64e0ba0b7f0c" containerID="d9322392b1b92609f7685bff1d3cd7477e05104fb373055b2bb1b7274e99656e" exitCode=0 Nov 26 05:39:27 crc kubenswrapper[4871]: I1126 05:39:27.657820 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b" event={"ID":"2a991e15-5da4-457c-95b8-64e0ba0b7f0c","Type":"ContainerDied","Data":"d9322392b1b92609f7685bff1d3cd7477e05104fb373055b2bb1b7274e99656e"} Nov 26 05:39:27 crc kubenswrapper[4871]: I1126 05:39:27.722366 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-khc9n" podStartSLOduration=2.3004255750000002 podStartE2EDuration="3.722349178s" podCreationTimestamp="2025-11-26 05:39:24 +0000 UTC" firstStartedPulling="2025-11-26 05:39:25.634817041 +0000 UTC m=+823.817868657" lastFinishedPulling="2025-11-26 05:39:27.056740674 +0000 UTC m=+825.239792260" observedRunningTime="2025-11-26 05:39:27.688418738 +0000 UTC m=+825.871470324" watchObservedRunningTime="2025-11-26 05:39:27.722349178 +0000 UTC m=+825.905400764" Nov 26 05:39:28 crc kubenswrapper[4871]: I1126 05:39:28.668447 4871 generic.go:334] "Generic (PLEG): container finished" podID="2a991e15-5da4-457c-95b8-64e0ba0b7f0c" containerID="511b60f48b840ab02c7e9319e561dd046d3cff89ba27dd8c2845ec456017a12f" exitCode=0 Nov 26 05:39:28 crc kubenswrapper[4871]: I1126 05:39:28.668596 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b" event={"ID":"2a991e15-5da4-457c-95b8-64e0ba0b7f0c","Type":"ContainerDied","Data":"511b60f48b840ab02c7e9319e561dd046d3cff89ba27dd8c2845ec456017a12f"} Nov 26 05:39:30 crc kubenswrapper[4871]: I1126 05:39:30.083968 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b" Nov 26 05:39:30 crc kubenswrapper[4871]: I1126 05:39:30.141277 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2a991e15-5da4-457c-95b8-64e0ba0b7f0c-bundle\") pod \"2a991e15-5da4-457c-95b8-64e0ba0b7f0c\" (UID: \"2a991e15-5da4-457c-95b8-64e0ba0b7f0c\") " Nov 26 05:39:30 crc kubenswrapper[4871]: I1126 05:39:30.141351 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p57qt\" (UniqueName: \"kubernetes.io/projected/2a991e15-5da4-457c-95b8-64e0ba0b7f0c-kube-api-access-p57qt\") pod \"2a991e15-5da4-457c-95b8-64e0ba0b7f0c\" (UID: \"2a991e15-5da4-457c-95b8-64e0ba0b7f0c\") " Nov 26 05:39:30 crc kubenswrapper[4871]: I1126 05:39:30.141377 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2a991e15-5da4-457c-95b8-64e0ba0b7f0c-util\") pod \"2a991e15-5da4-457c-95b8-64e0ba0b7f0c\" (UID: \"2a991e15-5da4-457c-95b8-64e0ba0b7f0c\") " Nov 26 05:39:30 crc kubenswrapper[4871]: I1126 05:39:30.142246 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a991e15-5da4-457c-95b8-64e0ba0b7f0c-bundle" (OuterVolumeSpecName: "bundle") pod "2a991e15-5da4-457c-95b8-64e0ba0b7f0c" (UID: "2a991e15-5da4-457c-95b8-64e0ba0b7f0c"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:39:30 crc kubenswrapper[4871]: I1126 05:39:30.147774 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a991e15-5da4-457c-95b8-64e0ba0b7f0c-kube-api-access-p57qt" (OuterVolumeSpecName: "kube-api-access-p57qt") pod "2a991e15-5da4-457c-95b8-64e0ba0b7f0c" (UID: "2a991e15-5da4-457c-95b8-64e0ba0b7f0c"). InnerVolumeSpecName "kube-api-access-p57qt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:39:30 crc kubenswrapper[4871]: I1126 05:39:30.161592 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a991e15-5da4-457c-95b8-64e0ba0b7f0c-util" (OuterVolumeSpecName: "util") pod "2a991e15-5da4-457c-95b8-64e0ba0b7f0c" (UID: "2a991e15-5da4-457c-95b8-64e0ba0b7f0c"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:39:30 crc kubenswrapper[4871]: I1126 05:39:30.243811 4871 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2a991e15-5da4-457c-95b8-64e0ba0b7f0c-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:39:30 crc kubenswrapper[4871]: I1126 05:39:30.243863 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p57qt\" (UniqueName: \"kubernetes.io/projected/2a991e15-5da4-457c-95b8-64e0ba0b7f0c-kube-api-access-p57qt\") on node \"crc\" DevicePath \"\"" Nov 26 05:39:30 crc kubenswrapper[4871]: I1126 05:39:30.243891 4871 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2a991e15-5da4-457c-95b8-64e0ba0b7f0c-util\") on node \"crc\" DevicePath \"\"" Nov 26 05:39:30 crc kubenswrapper[4871]: I1126 05:39:30.689950 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b" event={"ID":"2a991e15-5da4-457c-95b8-64e0ba0b7f0c","Type":"ContainerDied","Data":"0bf8e2bfb20622eb6dfd8bbe90dc5e42e8e0d8cad5afd01e3367705a4d36bf38"} Nov 26 05:39:30 crc kubenswrapper[4871]: I1126 05:39:30.690569 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0bf8e2bfb20622eb6dfd8bbe90dc5e42e8e0d8cad5afd01e3367705a4d36bf38" Nov 26 05:39:30 crc kubenswrapper[4871]: I1126 05:39:30.690007 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b" Nov 26 05:39:34 crc kubenswrapper[4871]: I1126 05:39:34.745388 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-khc9n" Nov 26 05:39:34 crc kubenswrapper[4871]: I1126 05:39:34.745759 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-khc9n" Nov 26 05:39:34 crc kubenswrapper[4871]: I1126 05:39:34.813560 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-khc9n" Nov 26 05:39:34 crc kubenswrapper[4871]: I1126 05:39:34.935257 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5675dd9766-bp9px"] Nov 26 05:39:34 crc kubenswrapper[4871]: E1126 05:39:34.935579 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a991e15-5da4-457c-95b8-64e0ba0b7f0c" containerName="util" Nov 26 05:39:34 crc kubenswrapper[4871]: I1126 05:39:34.935593 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a991e15-5da4-457c-95b8-64e0ba0b7f0c" containerName="util" Nov 26 05:39:34 crc kubenswrapper[4871]: E1126 05:39:34.935619 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a991e15-5da4-457c-95b8-64e0ba0b7f0c" containerName="pull" Nov 26 05:39:34 crc kubenswrapper[4871]: I1126 05:39:34.935629 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a991e15-5da4-457c-95b8-64e0ba0b7f0c" containerName="pull" Nov 26 05:39:34 crc kubenswrapper[4871]: E1126 05:39:34.935640 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a991e15-5da4-457c-95b8-64e0ba0b7f0c" containerName="extract" Nov 26 05:39:34 crc kubenswrapper[4871]: I1126 05:39:34.935647 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a991e15-5da4-457c-95b8-64e0ba0b7f0c" containerName="extract" Nov 26 05:39:34 crc kubenswrapper[4871]: I1126 05:39:34.935812 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a991e15-5da4-457c-95b8-64e0ba0b7f0c" containerName="extract" Nov 26 05:39:34 crc kubenswrapper[4871]: I1126 05:39:34.936293 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-bp9px" Nov 26 05:39:34 crc kubenswrapper[4871]: I1126 05:39:34.938217 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-hswlh" Nov 26 05:39:34 crc kubenswrapper[4871]: I1126 05:39:34.955698 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5675dd9766-bp9px"] Nov 26 05:39:35 crc kubenswrapper[4871]: I1126 05:39:35.023109 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kf9rw\" (UniqueName: \"kubernetes.io/projected/d78961c7-c9ff-4550-bf75-add0fcef53fe-kube-api-access-kf9rw\") pod \"openstack-operator-controller-operator-5675dd9766-bp9px\" (UID: \"d78961c7-c9ff-4550-bf75-add0fcef53fe\") " pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-bp9px" Nov 26 05:39:35 crc kubenswrapper[4871]: I1126 05:39:35.124883 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kf9rw\" (UniqueName: \"kubernetes.io/projected/d78961c7-c9ff-4550-bf75-add0fcef53fe-kube-api-access-kf9rw\") pod \"openstack-operator-controller-operator-5675dd9766-bp9px\" (UID: \"d78961c7-c9ff-4550-bf75-add0fcef53fe\") " pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-bp9px" Nov 26 05:39:35 crc kubenswrapper[4871]: I1126 05:39:35.153309 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kf9rw\" (UniqueName: \"kubernetes.io/projected/d78961c7-c9ff-4550-bf75-add0fcef53fe-kube-api-access-kf9rw\") pod \"openstack-operator-controller-operator-5675dd9766-bp9px\" (UID: \"d78961c7-c9ff-4550-bf75-add0fcef53fe\") " pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-bp9px" Nov 26 05:39:35 crc kubenswrapper[4871]: I1126 05:39:35.252262 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-bp9px" Nov 26 05:39:35 crc kubenswrapper[4871]: I1126 05:39:35.770631 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5675dd9766-bp9px"] Nov 26 05:39:35 crc kubenswrapper[4871]: I1126 05:39:35.785778 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-khc9n" Nov 26 05:39:36 crc kubenswrapper[4871]: I1126 05:39:36.740150 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-bp9px" event={"ID":"d78961c7-c9ff-4550-bf75-add0fcef53fe","Type":"ContainerStarted","Data":"81d0b887e490ad57cac7c9bf0d74d20cd61d7474e88d7fd571900f7f747693a6"} Nov 26 05:39:37 crc kubenswrapper[4871]: I1126 05:39:37.147629 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-khc9n"] Nov 26 05:39:37 crc kubenswrapper[4871]: I1126 05:39:37.750256 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-khc9n" podUID="2ae96304-ccbd-4621-85e9-34789c3a1afe" containerName="registry-server" containerID="cri-o://6143a09edcabd5bf463b9eebe61d1808c93755c42bcaa6709cef8c97b3adcdd7" gracePeriod=2 Nov 26 05:39:38 crc kubenswrapper[4871]: I1126 05:39:38.760093 4871 generic.go:334] "Generic (PLEG): container finished" podID="2ae96304-ccbd-4621-85e9-34789c3a1afe" containerID="6143a09edcabd5bf463b9eebe61d1808c93755c42bcaa6709cef8c97b3adcdd7" exitCode=0 Nov 26 05:39:38 crc kubenswrapper[4871]: I1126 05:39:38.760135 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-khc9n" event={"ID":"2ae96304-ccbd-4621-85e9-34789c3a1afe","Type":"ContainerDied","Data":"6143a09edcabd5bf463b9eebe61d1808c93755c42bcaa6709cef8c97b3adcdd7"} Nov 26 05:39:40 crc kubenswrapper[4871]: I1126 05:39:40.322950 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-khc9n" Nov 26 05:39:40 crc kubenswrapper[4871]: I1126 05:39:40.417213 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ae96304-ccbd-4621-85e9-34789c3a1afe-utilities\") pod \"2ae96304-ccbd-4621-85e9-34789c3a1afe\" (UID: \"2ae96304-ccbd-4621-85e9-34789c3a1afe\") " Nov 26 05:39:40 crc kubenswrapper[4871]: I1126 05:39:40.417380 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4zpg\" (UniqueName: \"kubernetes.io/projected/2ae96304-ccbd-4621-85e9-34789c3a1afe-kube-api-access-l4zpg\") pod \"2ae96304-ccbd-4621-85e9-34789c3a1afe\" (UID: \"2ae96304-ccbd-4621-85e9-34789c3a1afe\") " Nov 26 05:39:40 crc kubenswrapper[4871]: I1126 05:39:40.417445 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ae96304-ccbd-4621-85e9-34789c3a1afe-catalog-content\") pod \"2ae96304-ccbd-4621-85e9-34789c3a1afe\" (UID: \"2ae96304-ccbd-4621-85e9-34789c3a1afe\") " Nov 26 05:39:40 crc kubenswrapper[4871]: I1126 05:39:40.418354 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ae96304-ccbd-4621-85e9-34789c3a1afe-utilities" (OuterVolumeSpecName: "utilities") pod "2ae96304-ccbd-4621-85e9-34789c3a1afe" (UID: "2ae96304-ccbd-4621-85e9-34789c3a1afe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:39:40 crc kubenswrapper[4871]: I1126 05:39:40.424775 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ae96304-ccbd-4621-85e9-34789c3a1afe-kube-api-access-l4zpg" (OuterVolumeSpecName: "kube-api-access-l4zpg") pod "2ae96304-ccbd-4621-85e9-34789c3a1afe" (UID: "2ae96304-ccbd-4621-85e9-34789c3a1afe"). InnerVolumeSpecName "kube-api-access-l4zpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:39:40 crc kubenswrapper[4871]: I1126 05:39:40.465592 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ae96304-ccbd-4621-85e9-34789c3a1afe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2ae96304-ccbd-4621-85e9-34789c3a1afe" (UID: "2ae96304-ccbd-4621-85e9-34789c3a1afe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:39:40 crc kubenswrapper[4871]: I1126 05:39:40.518936 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4zpg\" (UniqueName: \"kubernetes.io/projected/2ae96304-ccbd-4621-85e9-34789c3a1afe-kube-api-access-l4zpg\") on node \"crc\" DevicePath \"\"" Nov 26 05:39:40 crc kubenswrapper[4871]: I1126 05:39:40.518971 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ae96304-ccbd-4621-85e9-34789c3a1afe-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:39:40 crc kubenswrapper[4871]: I1126 05:39:40.518982 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ae96304-ccbd-4621-85e9-34789c3a1afe-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:39:40 crc kubenswrapper[4871]: I1126 05:39:40.780742 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-bp9px" event={"ID":"d78961c7-c9ff-4550-bf75-add0fcef53fe","Type":"ContainerStarted","Data":"3b2f9888499828a8b3269d3242740a4cd2e327e8c488ac4cdf2a16230027d05e"} Nov 26 05:39:40 crc kubenswrapper[4871]: I1126 05:39:40.785365 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-khc9n" event={"ID":"2ae96304-ccbd-4621-85e9-34789c3a1afe","Type":"ContainerDied","Data":"eb0294ebc5c2df0d6417aa34169487100e12a5005b0d4adab7e592cb3cbfee0f"} Nov 26 05:39:40 crc kubenswrapper[4871]: I1126 05:39:40.785451 4871 scope.go:117] "RemoveContainer" containerID="6143a09edcabd5bf463b9eebe61d1808c93755c42bcaa6709cef8c97b3adcdd7" Nov 26 05:39:40 crc kubenswrapper[4871]: I1126 05:39:40.785558 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-khc9n" Nov 26 05:39:40 crc kubenswrapper[4871]: I1126 05:39:40.816366 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-bp9px" podStartSLOduration=2.531871243 podStartE2EDuration="6.816345185s" podCreationTimestamp="2025-11-26 05:39:34 +0000 UTC" firstStartedPulling="2025-11-26 05:39:35.777826135 +0000 UTC m=+833.960877721" lastFinishedPulling="2025-11-26 05:39:40.062300067 +0000 UTC m=+838.245351663" observedRunningTime="2025-11-26 05:39:40.811709268 +0000 UTC m=+838.994760864" watchObservedRunningTime="2025-11-26 05:39:40.816345185 +0000 UTC m=+838.999396771" Nov 26 05:39:40 crc kubenswrapper[4871]: I1126 05:39:40.819084 4871 scope.go:117] "RemoveContainer" containerID="c4ccb902c8c6ce1f192462127d7926e272cd23720ca392c048bf8e9ac595ee5b" Nov 26 05:39:40 crc kubenswrapper[4871]: I1126 05:39:40.830891 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-khc9n"] Nov 26 05:39:40 crc kubenswrapper[4871]: I1126 05:39:40.839609 4871 scope.go:117] "RemoveContainer" containerID="a1e5f96b383eeab55f99088e0e56661478f9c0587fa58317a321f2c436d682dd" Nov 26 05:39:40 crc kubenswrapper[4871]: I1126 05:39:40.841259 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-khc9n"] Nov 26 05:39:41 crc kubenswrapper[4871]: I1126 05:39:41.795884 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-bp9px" Nov 26 05:39:42 crc kubenswrapper[4871]: I1126 05:39:42.542107 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ae96304-ccbd-4621-85e9-34789c3a1afe" path="/var/lib/kubelet/pods/2ae96304-ccbd-4621-85e9-34789c3a1afe/volumes" Nov 26 05:39:45 crc kubenswrapper[4871]: I1126 05:39:45.255719 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-bp9px" Nov 26 05:40:23 crc kubenswrapper[4871]: I1126 05:40:23.614692 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:40:23 crc kubenswrapper[4871]: I1126 05:40:23.616478 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.933447 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4"] Nov 26 05:40:24 crc kubenswrapper[4871]: E1126 05:40:24.935232 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ae96304-ccbd-4621-85e9-34789c3a1afe" containerName="extract-utilities" Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.935342 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ae96304-ccbd-4621-85e9-34789c3a1afe" containerName="extract-utilities" Nov 26 05:40:24 crc kubenswrapper[4871]: E1126 05:40:24.935442 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ae96304-ccbd-4621-85e9-34789c3a1afe" containerName="registry-server" Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.935541 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ae96304-ccbd-4621-85e9-34789c3a1afe" containerName="registry-server" Nov 26 05:40:24 crc kubenswrapper[4871]: E1126 05:40:24.935643 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ae96304-ccbd-4621-85e9-34789c3a1afe" containerName="extract-content" Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.935711 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ae96304-ccbd-4621-85e9-34789c3a1afe" containerName="extract-content" Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.935918 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ae96304-ccbd-4621-85e9-34789c3a1afe" containerName="registry-server" Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.936846 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.940583 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk"] Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.941787 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.945089 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-pk9k6" Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.945276 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-6mpnz" Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.949851 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4"] Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.963948 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-tsz49"] Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.965090 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.970138 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-f9cxs" Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.970277 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk"] Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.973854 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lcrr4\" (UniqueName: \"kubernetes.io/projected/70168336-54b1-481f-b6a0-d565be07d353-kube-api-access-lcrr4\") pod \"designate-operator-controller-manager-955677c94-tsz49\" (UID: \"70168336-54b1-481f-b6a0-d565be07d353\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.973907 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76c2l\" (UniqueName: \"kubernetes.io/projected/8c65e9f4-e3de-4bce-851a-f85c1036daa7-kube-api-access-76c2l\") pod \"barbican-operator-controller-manager-7b64f4fb85-bdpn4\" (UID: \"8c65e9f4-e3de-4bce-851a-f85c1036daa7\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.973995 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bbxb\" (UniqueName: \"kubernetes.io/projected/ea13fc75-b3f0-48d3-9d86-5262df2957eb-kube-api-access-8bbxb\") pod \"cinder-operator-controller-manager-6b7f75547b-wmwwk\" (UID: \"ea13fc75-b3f0-48d3-9d86-5262df2957eb\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.981139 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-tsz49"] Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.995351 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j"] Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.996325 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" Nov 26 05:40:24 crc kubenswrapper[4871]: I1126 05:40:24.998997 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-jsnwn" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.012463 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.026370 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.027404 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.032633 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-h8mks" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.041870 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.044448 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.051089 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.059079 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-lhlqv" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.074543 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.075499 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.077171 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lcrr4\" (UniqueName: \"kubernetes.io/projected/70168336-54b1-481f-b6a0-d565be07d353-kube-api-access-lcrr4\") pod \"designate-operator-controller-manager-955677c94-tsz49\" (UID: \"70168336-54b1-481f-b6a0-d565be07d353\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.077217 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76c2l\" (UniqueName: \"kubernetes.io/projected/8c65e9f4-e3de-4bce-851a-f85c1036daa7-kube-api-access-76c2l\") pod \"barbican-operator-controller-manager-7b64f4fb85-bdpn4\" (UID: \"8c65e9f4-e3de-4bce-851a-f85c1036daa7\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.077261 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kncbj\" (UniqueName: \"kubernetes.io/projected/94ce6277-5176-415b-9f4d-847a73c93723-kube-api-access-kncbj\") pod \"glance-operator-controller-manager-589cbd6b5b-czv5j\" (UID: \"94ce6277-5176-415b-9f4d-847a73c93723\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.077302 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drftd\" (UniqueName: \"kubernetes.io/projected/9253bdc4-d16f-42eb-8704-0965e99dfe47-kube-api-access-drftd\") pod \"heat-operator-controller-manager-5b77f656f-5kslm\" (UID: \"9253bdc4-d16f-42eb-8704-0965e99dfe47\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.077323 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bbxb\" (UniqueName: \"kubernetes.io/projected/ea13fc75-b3f0-48d3-9d86-5262df2957eb-kube-api-access-8bbxb\") pod \"cinder-operator-controller-manager-6b7f75547b-wmwwk\" (UID: \"ea13fc75-b3f0-48d3-9d86-5262df2957eb\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.077357 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ptm6j\" (UniqueName: \"kubernetes.io/projected/4659b831-32eb-4da2-97f3-f654a299605e-kube-api-access-ptm6j\") pod \"horizon-operator-controller-manager-5d494799bf-clm5v\" (UID: \"4659b831-32eb-4da2-97f3-f654a299605e\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.083583 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.088577 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.090210 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.090430 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-6chhg" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.094817 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.095804 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.102584 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.112213 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-g4dzt" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.120715 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lcrr4\" (UniqueName: \"kubernetes.io/projected/70168336-54b1-481f-b6a0-d565be07d353-kube-api-access-lcrr4\") pod \"designate-operator-controller-manager-955677c94-tsz49\" (UID: \"70168336-54b1-481f-b6a0-d565be07d353\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.121429 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.123759 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.125017 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.129688 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-4k6kn" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.129984 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-wgkrk" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.144110 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.151045 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bbxb\" (UniqueName: \"kubernetes.io/projected/ea13fc75-b3f0-48d3-9d86-5262df2957eb-kube-api-access-8bbxb\") pod \"cinder-operator-controller-manager-6b7f75547b-wmwwk\" (UID: \"ea13fc75-b3f0-48d3-9d86-5262df2957eb\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.154667 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76c2l\" (UniqueName: \"kubernetes.io/projected/8c65e9f4-e3de-4bce-851a-f85c1036daa7-kube-api-access-76c2l\") pod \"barbican-operator-controller-manager-7b64f4fb85-bdpn4\" (UID: \"8c65e9f4-e3de-4bce-851a-f85c1036daa7\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.168570 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.180796 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmdgt\" (UniqueName: \"kubernetes.io/projected/51410db5-d309-4625-8f36-02cf8f0ba419-kube-api-access-qmdgt\") pod \"ironic-operator-controller-manager-67cb4dc6d4-lzsqj\" (UID: \"51410db5-d309-4625-8f36-02cf8f0ba419\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.180851 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ptm6j\" (UniqueName: \"kubernetes.io/projected/4659b831-32eb-4da2-97f3-f654a299605e-kube-api-access-ptm6j\") pod \"horizon-operator-controller-manager-5d494799bf-clm5v\" (UID: \"4659b831-32eb-4da2-97f3-f654a299605e\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.180871 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8nwt\" (UniqueName: \"kubernetes.io/projected/6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c-kube-api-access-r8nwt\") pod \"manila-operator-controller-manager-5d499bf58b-jvztg\" (UID: \"6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.180890 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/06b4e3ae-765b-41c4-9334-4e33c2dc305f-cert\") pod \"infra-operator-controller-manager-57548d458d-x5hqw\" (UID: \"06b4e3ae-765b-41c4-9334-4e33c2dc305f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.180915 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqxc5\" (UniqueName: \"kubernetes.io/projected/32cd59dd-1a82-4fce-81b1-ebc8f75f1e93-kube-api-access-wqxc5\") pod \"keystone-operator-controller-manager-7b4567c7cf-4gvxx\" (UID: \"32cd59dd-1a82-4fce-81b1-ebc8f75f1e93\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.180972 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27psm\" (UniqueName: \"kubernetes.io/projected/06b4e3ae-765b-41c4-9334-4e33c2dc305f-kube-api-access-27psm\") pod \"infra-operator-controller-manager-57548d458d-x5hqw\" (UID: \"06b4e3ae-765b-41c4-9334-4e33c2dc305f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.180991 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kncbj\" (UniqueName: \"kubernetes.io/projected/94ce6277-5176-415b-9f4d-847a73c93723-kube-api-access-kncbj\") pod \"glance-operator-controller-manager-589cbd6b5b-czv5j\" (UID: \"94ce6277-5176-415b-9f4d-847a73c93723\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.181023 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drftd\" (UniqueName: \"kubernetes.io/projected/9253bdc4-d16f-42eb-8704-0965e99dfe47-kube-api-access-drftd\") pod \"heat-operator-controller-manager-5b77f656f-5kslm\" (UID: \"9253bdc4-d16f-42eb-8704-0965e99dfe47\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.186926 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.189351 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.202014 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.202863 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.203704 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.211581 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.211948 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.212180 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.214086 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-l479r" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.214273 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-x6sgw" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.214492 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-2wncc" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.222256 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kncbj\" (UniqueName: \"kubernetes.io/projected/94ce6277-5176-415b-9f4d-847a73c93723-kube-api-access-kncbj\") pod \"glance-operator-controller-manager-589cbd6b5b-czv5j\" (UID: \"94ce6277-5176-415b-9f4d-847a73c93723\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.226409 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptm6j\" (UniqueName: \"kubernetes.io/projected/4659b831-32eb-4da2-97f3-f654a299605e-kube-api-access-ptm6j\") pod \"horizon-operator-controller-manager-5d494799bf-clm5v\" (UID: \"4659b831-32eb-4da2-97f3-f654a299605e\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.228148 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drftd\" (UniqueName: \"kubernetes.io/projected/9253bdc4-d16f-42eb-8704-0965e99dfe47-kube-api-access-drftd\") pod \"heat-operator-controller-manager-5b77f656f-5kslm\" (UID: \"9253bdc4-d16f-42eb-8704-0965e99dfe47\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.240960 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.258868 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.263510 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.264923 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.267207 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-r6fps" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.272393 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.282147 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27psm\" (UniqueName: \"kubernetes.io/projected/06b4e3ae-765b-41c4-9334-4e33c2dc305f-kube-api-access-27psm\") pod \"infra-operator-controller-manager-57548d458d-x5hqw\" (UID: \"06b4e3ae-765b-41c4-9334-4e33c2dc305f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.282290 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmdgt\" (UniqueName: \"kubernetes.io/projected/51410db5-d309-4625-8f36-02cf8f0ba419-kube-api-access-qmdgt\") pod \"ironic-operator-controller-manager-67cb4dc6d4-lzsqj\" (UID: \"51410db5-d309-4625-8f36-02cf8f0ba419\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.282374 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8nwt\" (UniqueName: \"kubernetes.io/projected/6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c-kube-api-access-r8nwt\") pod \"manila-operator-controller-manager-5d499bf58b-jvztg\" (UID: \"6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.282454 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/06b4e3ae-765b-41c4-9334-4e33c2dc305f-cert\") pod \"infra-operator-controller-manager-57548d458d-x5hqw\" (UID: \"06b4e3ae-765b-41c4-9334-4e33c2dc305f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.282548 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqxc5\" (UniqueName: \"kubernetes.io/projected/32cd59dd-1a82-4fce-81b1-ebc8f75f1e93-kube-api-access-wqxc5\") pod \"keystone-operator-controller-manager-7b4567c7cf-4gvxx\" (UID: \"32cd59dd-1a82-4fce-81b1-ebc8f75f1e93\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" Nov 26 05:40:25 crc kubenswrapper[4871]: E1126 05:40:25.283042 4871 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 05:40:25 crc kubenswrapper[4871]: E1126 05:40:25.283149 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/06b4e3ae-765b-41c4-9334-4e33c2dc305f-cert podName:06b4e3ae-765b-41c4-9334-4e33c2dc305f nodeName:}" failed. No retries permitted until 2025-11-26 05:40:25.783133962 +0000 UTC m=+883.966185548 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/06b4e3ae-765b-41c4-9334-4e33c2dc305f-cert") pod "infra-operator-controller-manager-57548d458d-x5hqw" (UID: "06b4e3ae-765b-41c4-9334-4e33c2dc305f") : secret "infra-operator-webhook-server-cert" not found Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.285196 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.286037 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.298626 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.299137 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.306562 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-fnxxc" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.307913 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqxc5\" (UniqueName: \"kubernetes.io/projected/32cd59dd-1a82-4fce-81b1-ebc8f75f1e93-kube-api-access-wqxc5\") pod \"keystone-operator-controller-manager-7b4567c7cf-4gvxx\" (UID: \"32cd59dd-1a82-4fce-81b1-ebc8f75f1e93\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.313117 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmdgt\" (UniqueName: \"kubernetes.io/projected/51410db5-d309-4625-8f36-02cf8f0ba419-kube-api-access-qmdgt\") pod \"ironic-operator-controller-manager-67cb4dc6d4-lzsqj\" (UID: \"51410db5-d309-4625-8f36-02cf8f0ba419\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.314028 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27psm\" (UniqueName: \"kubernetes.io/projected/06b4e3ae-765b-41c4-9334-4e33c2dc305f-kube-api-access-27psm\") pod \"infra-operator-controller-manager-57548d458d-x5hqw\" (UID: \"06b4e3ae-765b-41c4-9334-4e33c2dc305f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.314263 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.316575 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8nwt\" (UniqueName: \"kubernetes.io/projected/6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c-kube-api-access-r8nwt\") pod \"manila-operator-controller-manager-5d499bf58b-jvztg\" (UID: \"6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.318827 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.319156 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.344585 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.346603 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.347759 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.351968 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.352021 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-d6qg9" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.370800 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.375760 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.382665 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.383396 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lmbt\" (UniqueName: \"kubernetes.io/projected/19a75285-dcb7-4f34-b79c-613c96d555de-kube-api-access-9lmbt\") pod \"neutron-operator-controller-manager-6fdcddb789-6lpnj\" (UID: \"19a75285-dcb7-4f34-b79c-613c96d555de\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.383459 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztvgd\" (UniqueName: \"kubernetes.io/projected/2c7b5f25-e4ef-4abd-ba84-61b98f194ddd-kube-api-access-ztvgd\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-9xghq\" (UID: \"2c7b5f25-e4ef-4abd-ba84-61b98f194ddd\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.383587 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79cwl\" (UniqueName: \"kubernetes.io/projected/f68377a4-dee0-404b-988a-4f0673466e62-kube-api-access-79cwl\") pod \"nova-operator-controller-manager-79556f57fc-rlr55\" (UID: \"f68377a4-dee0-404b-988a-4f0673466e62\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.383644 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gv4sm\" (UniqueName: \"kubernetes.io/projected/33ba2b4e-6239-43c0-a694-6495b7ae2ba3-kube-api-access-gv4sm\") pod \"octavia-operator-controller-manager-64cdc6ff96-9lvtk\" (UID: \"33ba2b4e-6239-43c0-a694-6495b7ae2ba3\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.389346 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.395428 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.398866 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-8ngqz" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.407463 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.419545 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-skx5k"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.429330 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.436481 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-xhm7h" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.438747 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-skx5k"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.461087 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.463061 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.467619 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-8t7pb" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.472918 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.485726 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79cwl\" (UniqueName: \"kubernetes.io/projected/f68377a4-dee0-404b-988a-4f0673466e62-kube-api-access-79cwl\") pod \"nova-operator-controller-manager-79556f57fc-rlr55\" (UID: \"f68377a4-dee0-404b-988a-4f0673466e62\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.485774 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gv4sm\" (UniqueName: \"kubernetes.io/projected/33ba2b4e-6239-43c0-a694-6495b7ae2ba3-kube-api-access-gv4sm\") pod \"octavia-operator-controller-manager-64cdc6ff96-9lvtk\" (UID: \"33ba2b4e-6239-43c0-a694-6495b7ae2ba3\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.485794 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg\" (UID: \"6b5541da-9198-4f49-998b-1bfd982089d1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.485843 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lczgw\" (UniqueName: \"kubernetes.io/projected/6ccd73b2-dbfd-4cd6-845c-a61af4f20f96-kube-api-access-lczgw\") pod \"ovn-operator-controller-manager-56897c768d-shgb6\" (UID: \"6ccd73b2-dbfd-4cd6-845c-a61af4f20f96\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.485871 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9fqn\" (UniqueName: \"kubernetes.io/projected/6b5541da-9198-4f49-998b-1bfd982089d1-kube-api-access-c9fqn\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg\" (UID: \"6b5541da-9198-4f49-998b-1bfd982089d1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.485903 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lmbt\" (UniqueName: \"kubernetes.io/projected/19a75285-dcb7-4f34-b79c-613c96d555de-kube-api-access-9lmbt\") pod \"neutron-operator-controller-manager-6fdcddb789-6lpnj\" (UID: \"19a75285-dcb7-4f34-b79c-613c96d555de\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.485944 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztvgd\" (UniqueName: \"kubernetes.io/projected/2c7b5f25-e4ef-4abd-ba84-61b98f194ddd-kube-api-access-ztvgd\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-9xghq\" (UID: \"2c7b5f25-e4ef-4abd-ba84-61b98f194ddd\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.500368 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.501663 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.506226 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-xlclw" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.506784 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.511730 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79cwl\" (UniqueName: \"kubernetes.io/projected/f68377a4-dee0-404b-988a-4f0673466e62-kube-api-access-79cwl\") pod \"nova-operator-controller-manager-79556f57fc-rlr55\" (UID: \"f68377a4-dee0-404b-988a-4f0673466e62\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.513348 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gv4sm\" (UniqueName: \"kubernetes.io/projected/33ba2b4e-6239-43c0-a694-6495b7ae2ba3-kube-api-access-gv4sm\") pod \"octavia-operator-controller-manager-64cdc6ff96-9lvtk\" (UID: \"33ba2b4e-6239-43c0-a694-6495b7ae2ba3\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.525425 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.543712 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztvgd\" (UniqueName: \"kubernetes.io/projected/2c7b5f25-e4ef-4abd-ba84-61b98f194ddd-kube-api-access-ztvgd\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-9xghq\" (UID: \"2c7b5f25-e4ef-4abd-ba84-61b98f194ddd\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.545015 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lmbt\" (UniqueName: \"kubernetes.io/projected/19a75285-dcb7-4f34-b79c-613c96d555de-kube-api-access-9lmbt\") pod \"neutron-operator-controller-manager-6fdcddb789-6lpnj\" (UID: \"19a75285-dcb7-4f34-b79c-613c96d555de\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.586459 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.589139 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg\" (UID: \"6b5541da-9198-4f49-998b-1bfd982089d1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.589192 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7thm\" (UniqueName: \"kubernetes.io/projected/4b0778b1-b974-4ce6-bac4-59920ab67dd7-kube-api-access-h7thm\") pod \"swift-operator-controller-manager-d77b94747-skx5k\" (UID: \"4b0778b1-b974-4ce6-bac4-59920ab67dd7\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.589246 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lczgw\" (UniqueName: \"kubernetes.io/projected/6ccd73b2-dbfd-4cd6-845c-a61af4f20f96-kube-api-access-lczgw\") pod \"ovn-operator-controller-manager-56897c768d-shgb6\" (UID: \"6ccd73b2-dbfd-4cd6-845c-a61af4f20f96\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.589276 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9fqn\" (UniqueName: \"kubernetes.io/projected/6b5541da-9198-4f49-998b-1bfd982089d1-kube-api-access-c9fqn\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg\" (UID: \"6b5541da-9198-4f49-998b-1bfd982089d1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.589297 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8wj8\" (UniqueName: \"kubernetes.io/projected/974fe30e-68b5-42bb-9940-a2000ab315f8-kube-api-access-j8wj8\") pod \"telemetry-operator-controller-manager-76cc84c6bb-6kccm\" (UID: \"974fe30e-68b5-42bb-9940-a2000ab315f8\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.589319 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwh4l\" (UniqueName: \"kubernetes.io/projected/1cc75505-b927-488b-8a16-4fda9a1c2dca-kube-api-access-bwh4l\") pod \"placement-operator-controller-manager-57988cc5b5-dxbwn\" (UID: \"1cc75505-b927-488b-8a16-4fda9a1c2dca\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" Nov 26 05:40:25 crc kubenswrapper[4871]: E1126 05:40:25.589455 4871 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 05:40:25 crc kubenswrapper[4871]: E1126 05:40:25.589494 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert podName:6b5541da-9198-4f49-998b-1bfd982089d1 nodeName:}" failed. No retries permitted until 2025-11-26 05:40:26.089480737 +0000 UTC m=+884.272532323 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert") pod "openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" (UID: "6b5541da-9198-4f49-998b-1bfd982089d1") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.591036 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.597995 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.598094 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.601953 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-4knx6" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.626642 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9fqn\" (UniqueName: \"kubernetes.io/projected/6b5541da-9198-4f49-998b-1bfd982089d1-kube-api-access-c9fqn\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg\" (UID: \"6b5541da-9198-4f49-998b-1bfd982089d1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.628113 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.642782 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lczgw\" (UniqueName: \"kubernetes.io/projected/6ccd73b2-dbfd-4cd6-845c-a61af4f20f96-kube-api-access-lczgw\") pod \"ovn-operator-controller-manager-56897c768d-shgb6\" (UID: \"6ccd73b2-dbfd-4cd6-845c-a61af4f20f96\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.648301 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.659295 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.661063 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.661967 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.665335 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.665435 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-rhkvj" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.666284 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.674029 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.676818 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.700279 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zw768\" (UniqueName: \"kubernetes.io/projected/1b4fb0bb-1050-4bda-acf4-c3efafc79e4a-kube-api-access-zw768\") pod \"test-operator-controller-manager-5cd6c7f4c8-jj87z\" (UID: \"1b4fb0bb-1050-4bda-acf4-c3efafc79e4a\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.700394 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8wj8\" (UniqueName: \"kubernetes.io/projected/974fe30e-68b5-42bb-9940-a2000ab315f8-kube-api-access-j8wj8\") pod \"telemetry-operator-controller-manager-76cc84c6bb-6kccm\" (UID: \"974fe30e-68b5-42bb-9940-a2000ab315f8\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.700432 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwh4l\" (UniqueName: \"kubernetes.io/projected/1cc75505-b927-488b-8a16-4fda9a1c2dca-kube-api-access-bwh4l\") pod \"placement-operator-controller-manager-57988cc5b5-dxbwn\" (UID: \"1cc75505-b927-488b-8a16-4fda9a1c2dca\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.700546 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7thm\" (UniqueName: \"kubernetes.io/projected/4b0778b1-b974-4ce6-bac4-59920ab67dd7-kube-api-access-h7thm\") pod \"swift-operator-controller-manager-d77b94747-skx5k\" (UID: \"4b0778b1-b974-4ce6-bac4-59920ab67dd7\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.708386 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.720549 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwh4l\" (UniqueName: \"kubernetes.io/projected/1cc75505-b927-488b-8a16-4fda9a1c2dca-kube-api-access-bwh4l\") pod \"placement-operator-controller-manager-57988cc5b5-dxbwn\" (UID: \"1cc75505-b927-488b-8a16-4fda9a1c2dca\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.722446 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8wj8\" (UniqueName: \"kubernetes.io/projected/974fe30e-68b5-42bb-9940-a2000ab315f8-kube-api-access-j8wj8\") pod \"telemetry-operator-controller-manager-76cc84c6bb-6kccm\" (UID: \"974fe30e-68b5-42bb-9940-a2000ab315f8\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.722891 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7thm\" (UniqueName: \"kubernetes.io/projected/4b0778b1-b974-4ce6-bac4-59920ab67dd7-kube-api-access-h7thm\") pod \"swift-operator-controller-manager-d77b94747-skx5k\" (UID: \"4b0778b1-b974-4ce6-bac4-59920ab67dd7\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.727314 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.755300 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.756451 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.759689 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-69zl6" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.781964 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc"] Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.801989 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9rbl\" (UniqueName: \"kubernetes.io/projected/8d32351e-c0cc-4c2a-89b2-a79b61cf632e-kube-api-access-b9rbl\") pod \"watcher-operator-controller-manager-656dcb59d4-v95x7\" (UID: \"8d32351e-c0cc-4c2a-89b2-a79b61cf632e\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.802034 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-2v8hx\" (UID: \"6d7ff4ed-503b-4184-8633-47598150b7f0\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.802118 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zw768\" (UniqueName: \"kubernetes.io/projected/1b4fb0bb-1050-4bda-acf4-c3efafc79e4a-kube-api-access-zw768\") pod \"test-operator-controller-manager-5cd6c7f4c8-jj87z\" (UID: \"1b4fb0bb-1050-4bda-acf4-c3efafc79e4a\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.802155 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42xfb\" (UniqueName: \"kubernetes.io/projected/6d7ff4ed-503b-4184-8633-47598150b7f0-kube-api-access-42xfb\") pod \"openstack-operator-controller-manager-56868586f6-2v8hx\" (UID: \"6d7ff4ed-503b-4184-8633-47598150b7f0\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.802200 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-2v8hx\" (UID: \"6d7ff4ed-503b-4184-8633-47598150b7f0\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.802251 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/06b4e3ae-765b-41c4-9334-4e33c2dc305f-cert\") pod \"infra-operator-controller-manager-57548d458d-x5hqw\" (UID: \"06b4e3ae-765b-41c4-9334-4e33c2dc305f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:40:25 crc kubenswrapper[4871]: E1126 05:40:25.802382 4871 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 05:40:25 crc kubenswrapper[4871]: E1126 05:40:25.802439 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/06b4e3ae-765b-41c4-9334-4e33c2dc305f-cert podName:06b4e3ae-765b-41c4-9334-4e33c2dc305f nodeName:}" failed. No retries permitted until 2025-11-26 05:40:26.802420638 +0000 UTC m=+884.985472224 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/06b4e3ae-765b-41c4-9334-4e33c2dc305f-cert") pod "infra-operator-controller-manager-57548d458d-x5hqw" (UID: "06b4e3ae-765b-41c4-9334-4e33c2dc305f") : secret "infra-operator-webhook-server-cert" not found Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.810477 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.824703 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zw768\" (UniqueName: \"kubernetes.io/projected/1b4fb0bb-1050-4bda-acf4-c3efafc79e4a-kube-api-access-zw768\") pod \"test-operator-controller-manager-5cd6c7f4c8-jj87z\" (UID: \"1b4fb0bb-1050-4bda-acf4-c3efafc79e4a\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.893641 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.898853 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.903374 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tn2xs\" (UniqueName: \"kubernetes.io/projected/0b2406e7-8b16-45e1-b726-645d22421af5-kube-api-access-tn2xs\") pod \"rabbitmq-cluster-operator-manager-668c99d594-6c6pc\" (UID: \"0b2406e7-8b16-45e1-b726-645d22421af5\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.903428 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9rbl\" (UniqueName: \"kubernetes.io/projected/8d32351e-c0cc-4c2a-89b2-a79b61cf632e-kube-api-access-b9rbl\") pod \"watcher-operator-controller-manager-656dcb59d4-v95x7\" (UID: \"8d32351e-c0cc-4c2a-89b2-a79b61cf632e\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.903457 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-2v8hx\" (UID: \"6d7ff4ed-503b-4184-8633-47598150b7f0\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.903548 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42xfb\" (UniqueName: \"kubernetes.io/projected/6d7ff4ed-503b-4184-8633-47598150b7f0-kube-api-access-42xfb\") pod \"openstack-operator-controller-manager-56868586f6-2v8hx\" (UID: \"6d7ff4ed-503b-4184-8633-47598150b7f0\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.903594 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-2v8hx\" (UID: \"6d7ff4ed-503b-4184-8633-47598150b7f0\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:25 crc kubenswrapper[4871]: E1126 05:40:25.903717 4871 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 05:40:25 crc kubenswrapper[4871]: E1126 05:40:25.903767 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-metrics-certs podName:6d7ff4ed-503b-4184-8633-47598150b7f0 nodeName:}" failed. No retries permitted until 2025-11-26 05:40:26.403749693 +0000 UTC m=+884.586801279 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-metrics-certs") pod "openstack-operator-controller-manager-56868586f6-2v8hx" (UID: "6d7ff4ed-503b-4184-8633-47598150b7f0") : secret "metrics-server-cert" not found Nov 26 05:40:25 crc kubenswrapper[4871]: E1126 05:40:25.904024 4871 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 05:40:25 crc kubenswrapper[4871]: E1126 05:40:25.904054 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-webhook-certs podName:6d7ff4ed-503b-4184-8633-47598150b7f0 nodeName:}" failed. No retries permitted until 2025-11-26 05:40:26.404045531 +0000 UTC m=+884.587097107 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-webhook-certs") pod "openstack-operator-controller-manager-56868586f6-2v8hx" (UID: "6d7ff4ed-503b-4184-8633-47598150b7f0") : secret "webhook-server-cert" not found Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.924657 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9rbl\" (UniqueName: \"kubernetes.io/projected/8d32351e-c0cc-4c2a-89b2-a79b61cf632e-kube-api-access-b9rbl\") pod \"watcher-operator-controller-manager-656dcb59d4-v95x7\" (UID: \"8d32351e-c0cc-4c2a-89b2-a79b61cf632e\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" Nov 26 05:40:25 crc kubenswrapper[4871]: I1126 05:40:25.934326 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42xfb\" (UniqueName: \"kubernetes.io/projected/6d7ff4ed-503b-4184-8633-47598150b7f0-kube-api-access-42xfb\") pod \"openstack-operator-controller-manager-56868586f6-2v8hx\" (UID: \"6d7ff4ed-503b-4184-8633-47598150b7f0\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.005263 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tn2xs\" (UniqueName: \"kubernetes.io/projected/0b2406e7-8b16-45e1-b726-645d22421af5-kube-api-access-tn2xs\") pod \"rabbitmq-cluster-operator-manager-668c99d594-6c6pc\" (UID: \"0b2406e7-8b16-45e1-b726-645d22421af5\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.033484 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tn2xs\" (UniqueName: \"kubernetes.io/projected/0b2406e7-8b16-45e1-b726-645d22421af5-kube-api-access-tn2xs\") pod \"rabbitmq-cluster-operator-manager-668c99d594-6c6pc\" (UID: \"0b2406e7-8b16-45e1-b726-645d22421af5\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.044976 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.106469 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg\" (UID: \"6b5541da-9198-4f49-998b-1bfd982089d1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.107879 4871 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.107929 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert podName:6b5541da-9198-4f49-998b-1bfd982089d1 nodeName:}" failed. No retries permitted until 2025-11-26 05:40:27.107913182 +0000 UTC m=+885.290964768 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert") pod "openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" (UID: "6b5541da-9198-4f49-998b-1bfd982089d1") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.156110 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.300410 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4"] Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.304151 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk"] Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.312695 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j"] Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.316233 4871 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 05:40:26 crc kubenswrapper[4871]: W1126 05:40:26.318929 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod94ce6277_5176_415b_9f4d_847a73c93723.slice/crio-69456687b94411c388bc57b05cd635b6f763580d1462bb5a59cdaf6ec4e6d175 WatchSource:0}: Error finding container 69456687b94411c388bc57b05cd635b6f763580d1462bb5a59cdaf6ec4e6d175: Status 404 returned error can't find the container with id 69456687b94411c388bc57b05cd635b6f763580d1462bb5a59cdaf6ec4e6d175 Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.388093 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-tsz49"] Nov 26 05:40:26 crc kubenswrapper[4871]: W1126 05:40:26.391544 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod70168336_54b1_481f_b6a0_d565be07d353.slice/crio-ef681ac1ebd9160a2e3151b6f3d358006399bf073f8269ac541859fbfbafa510 WatchSource:0}: Error finding container ef681ac1ebd9160a2e3151b6f3d358006399bf073f8269ac541859fbfbafa510: Status 404 returned error can't find the container with id ef681ac1ebd9160a2e3151b6f3d358006399bf073f8269ac541859fbfbafa510 Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.392930 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v"] Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.406364 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx"] Nov 26 05:40:26 crc kubenswrapper[4871]: W1126 05:40:26.408377 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4659b831_32eb_4da2_97f3_f654a299605e.slice/crio-ce8107b6f176f84f599ed5b71811c3ea60f701535530e91b1430082d50c8e848 WatchSource:0}: Error finding container ce8107b6f176f84f599ed5b71811c3ea60f701535530e91b1430082d50c8e848: Status 404 returned error can't find the container with id ce8107b6f176f84f599ed5b71811c3ea60f701535530e91b1430082d50c8e848 Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.412982 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-2v8hx\" (UID: \"6d7ff4ed-503b-4184-8633-47598150b7f0\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.413157 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-2v8hx\" (UID: \"6d7ff4ed-503b-4184-8633-47598150b7f0\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.413444 4871 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.413666 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-webhook-certs podName:6d7ff4ed-503b-4184-8633-47598150b7f0 nodeName:}" failed. No retries permitted until 2025-11-26 05:40:27.413483128 +0000 UTC m=+885.596534714 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-webhook-certs") pod "openstack-operator-controller-manager-56868586f6-2v8hx" (UID: "6d7ff4ed-503b-4184-8633-47598150b7f0") : secret "webhook-server-cert" not found Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.414098 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm"] Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.414134 4871 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.414188 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-metrics-certs podName:6d7ff4ed-503b-4184-8633-47598150b7f0 nodeName:}" failed. No retries permitted until 2025-11-26 05:40:27.414170385 +0000 UTC m=+885.597221971 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-metrics-certs") pod "openstack-operator-controller-manager-56868586f6-2v8hx" (UID: "6d7ff4ed-503b-4184-8633-47598150b7f0") : secret "metrics-server-cert" not found Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.739088 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6"] Nov 26 05:40:26 crc kubenswrapper[4871]: W1126 05:40:26.742429 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6ccd73b2_dbfd_4cd6_845c_a61af4f20f96.slice/crio-c038f261f54ee5aea765ef43fb8a3e850f0474db0c40d46988c2dd9b3b6f9575 WatchSource:0}: Error finding container c038f261f54ee5aea765ef43fb8a3e850f0474db0c40d46988c2dd9b3b6f9575: Status 404 returned error can't find the container with id c038f261f54ee5aea765ef43fb8a3e850f0474db0c40d46988c2dd9b3b6f9575 Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.751742 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg"] Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.764475 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj"] Nov 26 05:40:26 crc kubenswrapper[4871]: W1126 05:40:26.770098 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod33ba2b4e_6239_43c0_a694_6495b7ae2ba3.slice/crio-32535d4b2dd71197a7d4f836531a7dc9b1f7bedd210cdc45431e32da4a3dfe09 WatchSource:0}: Error finding container 32535d4b2dd71197a7d4f836531a7dc9b1f7bedd210cdc45431e32da4a3dfe09: Status 404 returned error can't find the container with id 32535d4b2dd71197a7d4f836531a7dc9b1f7bedd210cdc45431e32da4a3dfe09 Nov 26 05:40:26 crc kubenswrapper[4871]: W1126 05:40:26.770329 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6dc2f30e_6f6a_4be9_b3b4_f2c7c636ca2c.slice/crio-f0555f9c567cc8232cf4eadec46fc714d8e49800eec9dfd4e296a7af8b086adb WatchSource:0}: Error finding container f0555f9c567cc8232cf4eadec46fc714d8e49800eec9dfd4e296a7af8b086adb: Status 404 returned error can't find the container with id f0555f9c567cc8232cf4eadec46fc714d8e49800eec9dfd4e296a7af8b086adb Nov 26 05:40:26 crc kubenswrapper[4871]: W1126 05:40:26.771492 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c7b5f25_e4ef_4abd_ba84_61b98f194ddd.slice/crio-c43aabefbfe4405fa92df4a1e9763dd7819c6fe2afcc3b65ed14d3a509e883f4 WatchSource:0}: Error finding container c43aabefbfe4405fa92df4a1e9763dd7819c6fe2afcc3b65ed14d3a509e883f4: Status 404 returned error can't find the container with id c43aabefbfe4405fa92df4a1e9763dd7819c6fe2afcc3b65ed14d3a509e883f4 Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.773818 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq"] Nov 26 05:40:26 crc kubenswrapper[4871]: W1126 05:40:26.776611 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf68377a4_dee0_404b_988a_4f0673466e62.slice/crio-bb9d922fa3c4cb6bfc6e33855ff192f1c280a837faccada34407a3942bffcd5a WatchSource:0}: Error finding container bb9d922fa3c4cb6bfc6e33855ff192f1c280a837faccada34407a3942bffcd5a: Status 404 returned error can't find the container with id bb9d922fa3c4cb6bfc6e33855ff192f1c280a837faccada34407a3942bffcd5a Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.779197 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-79cwl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-rlr55_openstack-operators(f68377a4-dee0-404b-988a-4f0673466e62): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.783134 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-79cwl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-rlr55_openstack-operators(f68377a4-dee0-404b-988a-4f0673466e62): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.785756 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" podUID="f68377a4-dee0-404b-988a-4f0673466e62" Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.795722 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk"] Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.805716 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zw768,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cd6c7f4c8-jj87z_openstack-operators(1b4fb0bb-1050-4bda-acf4-c3efafc79e4a): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.809750 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zw768,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cd6c7f4c8-jj87z_openstack-operators(1b4fb0bb-1050-4bda-acf4-c3efafc79e4a): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.811127 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z" podUID="1b4fb0bb-1050-4bda-acf4-c3efafc79e4a" Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.815630 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55"] Nov 26 05:40:26 crc kubenswrapper[4871]: W1126 05:40:26.818670 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod51410db5_d309_4625_8f36_02cf8f0ba419.slice/crio-aa5cd5bfd22f46953d88e792b3cb7a31bc62393c2d9bb93fe16bff4be2a8f55e WatchSource:0}: Error finding container aa5cd5bfd22f46953d88e792b3cb7a31bc62393c2d9bb93fe16bff4be2a8f55e: Status 404 returned error can't find the container with id aa5cd5bfd22f46953d88e792b3cb7a31bc62393c2d9bb93fe16bff4be2a8f55e Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.818850 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/06b4e3ae-765b-41c4-9334-4e33c2dc305f-cert\") pod \"infra-operator-controller-manager-57548d458d-x5hqw\" (UID: \"06b4e3ae-765b-41c4-9334-4e33c2dc305f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.818989 4871 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.819163 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/06b4e3ae-765b-41c4-9334-4e33c2dc305f-cert podName:06b4e3ae-765b-41c4-9334-4e33c2dc305f nodeName:}" failed. No retries permitted until 2025-11-26 05:40:28.819079049 +0000 UTC m=+887.002130635 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/06b4e3ae-765b-41c4-9334-4e33c2dc305f-cert") pod "infra-operator-controller-manager-57548d458d-x5hqw" (UID: "06b4e3ae-765b-41c4-9334-4e33c2dc305f") : secret "infra-operator-webhook-server-cert" not found Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.833626 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z"] Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.838932 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-j8wj8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-6kccm_openstack-operators(974fe30e-68b5-42bb-9940-a2000ab315f8): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.839172 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bwh4l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-57988cc5b5-dxbwn_openstack-operators(1cc75505-b927-488b-8a16-4fda9a1c2dca): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.839195 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:d65dbfc956e9cf376f3c48fc3a0942cb7306b5164f898c40d1efca106df81db7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qmdgt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-67cb4dc6d4-lzsqj_openstack-operators(51410db5-d309-4625-8f36-02cf8f0ba419): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.839940 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tn2xs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-6c6pc_openstack-operators(0b2406e7-8b16-45e1-b726-645d22421af5): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.840609 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-j8wj8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-6kccm_openstack-operators(974fe30e-68b5-42bb-9940-a2000ab315f8): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.841035 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" podUID="0b2406e7-8b16-45e1-b726-645d22421af5" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.841194 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qmdgt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-67cb4dc6d4-lzsqj_openstack-operators(51410db5-d309-4625-8f36-02cf8f0ba419): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.842516 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" podUID="974fe30e-68b5-42bb-9940-a2000ab315f8" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.843201 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bwh4l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-57988cc5b5-dxbwn_openstack-operators(1cc75505-b927-488b-8a16-4fda9a1c2dca): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.843318 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" podUID="51410db5-d309-4625-8f36-02cf8f0ba419" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.844331 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" podUID="1cc75505-b927-488b-8a16-4fda9a1c2dca" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.846195 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-b9rbl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-v95x7_openstack-operators(8d32351e-c0cc-4c2a-89b2-a79b61cf632e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.847605 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-h7thm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d77b94747-skx5k_openstack-operators(4b0778b1-b974-4ce6-bac4-59920ab67dd7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.848156 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-b9rbl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-v95x7_openstack-operators(8d32351e-c0cc-4c2a-89b2-a79b61cf632e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.849286 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" podUID="8d32351e-c0cc-4c2a-89b2-a79b61cf632e" Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.849653 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj"] Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.851034 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-h7thm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d77b94747-skx5k_openstack-operators(4b0778b1-b974-4ce6-bac4-59920ab67dd7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 05:40:26 crc kubenswrapper[4871]: E1126 05:40:26.852248 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" podUID="4b0778b1-b974-4ce6-bac4-59920ab67dd7" Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.859444 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm"] Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.864982 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn"] Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.869224 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7"] Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.873424 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-skx5k"] Nov 26 05:40:26 crc kubenswrapper[4871]: I1126 05:40:26.878376 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc"] Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.123750 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg\" (UID: \"6b5541da-9198-4f49-998b-1bfd982089d1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" Nov 26 05:40:27 crc kubenswrapper[4871]: E1126 05:40:27.123903 4871 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 05:40:27 crc kubenswrapper[4871]: E1126 05:40:27.123946 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert podName:6b5541da-9198-4f49-998b-1bfd982089d1 nodeName:}" failed. No retries permitted until 2025-11-26 05:40:29.123932937 +0000 UTC m=+887.306984523 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert") pod "openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" (UID: "6b5541da-9198-4f49-998b-1bfd982089d1") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.201771 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z" event={"ID":"1b4fb0bb-1050-4bda-acf4-c3efafc79e4a","Type":"ContainerStarted","Data":"34d45695a44b9f42103bd8e4321d668c9ab26b554e6fe3b80a38417db7f6c64d"} Nov 26 05:40:27 crc kubenswrapper[4871]: E1126 05:40:27.209276 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z" podUID="1b4fb0bb-1050-4bda-acf4-c3efafc79e4a" Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.209466 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" event={"ID":"974fe30e-68b5-42bb-9940-a2000ab315f8","Type":"ContainerStarted","Data":"3def5b8b868e1e0ec753c416672c7b9bbd94647a94d7b0e208396e7de033d7fd"} Nov 26 05:40:27 crc kubenswrapper[4871]: E1126 05:40:27.217102 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" podUID="974fe30e-68b5-42bb-9940-a2000ab315f8" Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.217649 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" event={"ID":"9253bdc4-d16f-42eb-8704-0965e99dfe47","Type":"ContainerStarted","Data":"f3baf75a426d5a959ebfd74677770fb894731c2906b9afdcea5fa7a34604610a"} Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.221709 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" event={"ID":"19a75285-dcb7-4f34-b79c-613c96d555de","Type":"ContainerStarted","Data":"122e199ff79f2e0aa435e8aba9af318c945f1c6564fadef0cf26d0993071ae24"} Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.223853 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" event={"ID":"94ce6277-5176-415b-9f4d-847a73c93723","Type":"ContainerStarted","Data":"69456687b94411c388bc57b05cd635b6f763580d1462bb5a59cdaf6ec4e6d175"} Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.229076 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" event={"ID":"f68377a4-dee0-404b-988a-4f0673466e62","Type":"ContainerStarted","Data":"bb9d922fa3c4cb6bfc6e33855ff192f1c280a837faccada34407a3942bffcd5a"} Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.230154 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" event={"ID":"1cc75505-b927-488b-8a16-4fda9a1c2dca","Type":"ContainerStarted","Data":"7e2141cafd209a954bd369f808c0a0fe7b6fe1c788e2aef13d0bab2e909cbcfb"} Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.232884 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" event={"ID":"6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c","Type":"ContainerStarted","Data":"f0555f9c567cc8232cf4eadec46fc714d8e49800eec9dfd4e296a7af8b086adb"} Nov 26 05:40:27 crc kubenswrapper[4871]: E1126 05:40:27.249274 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" podUID="1cc75505-b927-488b-8a16-4fda9a1c2dca" Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.249293 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" event={"ID":"8d32351e-c0cc-4c2a-89b2-a79b61cf632e","Type":"ContainerStarted","Data":"b4a8305fc8934039d3b08749d5a6bd6b37411ff5ee5d92ab1e68cd027cf8a7cf"} Nov 26 05:40:27 crc kubenswrapper[4871]: E1126 05:40:27.250298 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" podUID="f68377a4-dee0-404b-988a-4f0673466e62" Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.252467 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" event={"ID":"0b2406e7-8b16-45e1-b726-645d22421af5","Type":"ContainerStarted","Data":"a6112bdbd3fd3059e5149648d3d022f0da95315fe91199e90102bca6c2dcfb7e"} Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.253993 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" event={"ID":"4b0778b1-b974-4ce6-bac4-59920ab67dd7","Type":"ContainerStarted","Data":"9ba46a000136de5f54daaeebbfc0836fca5590a82f16bda6ce941c5b40fc9865"} Nov 26 05:40:27 crc kubenswrapper[4871]: E1126 05:40:27.259596 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" podUID="0b2406e7-8b16-45e1-b726-645d22421af5" Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.260113 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" event={"ID":"33ba2b4e-6239-43c0-a694-6495b7ae2ba3","Type":"ContainerStarted","Data":"32535d4b2dd71197a7d4f836531a7dc9b1f7bedd210cdc45431e32da4a3dfe09"} Nov 26 05:40:27 crc kubenswrapper[4871]: E1126 05:40:27.260245 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" podUID="8d32351e-c0cc-4c2a-89b2-a79b61cf632e" Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.262475 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" event={"ID":"ea13fc75-b3f0-48d3-9d86-5262df2957eb","Type":"ContainerStarted","Data":"57e50fec81f16bb1349f3f52bd44b85c037c0e5b5b484d593ed7247274732afd"} Nov 26 05:40:27 crc kubenswrapper[4871]: E1126 05:40:27.264595 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" podUID="4b0778b1-b974-4ce6-bac4-59920ab67dd7" Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.264850 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" event={"ID":"2c7b5f25-e4ef-4abd-ba84-61b98f194ddd","Type":"ContainerStarted","Data":"c43aabefbfe4405fa92df4a1e9763dd7819c6fe2afcc3b65ed14d3a509e883f4"} Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.266919 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" event={"ID":"70168336-54b1-481f-b6a0-d565be07d353","Type":"ContainerStarted","Data":"ef681ac1ebd9160a2e3151b6f3d358006399bf073f8269ac541859fbfbafa510"} Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.277973 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" event={"ID":"51410db5-d309-4625-8f36-02cf8f0ba419","Type":"ContainerStarted","Data":"aa5cd5bfd22f46953d88e792b3cb7a31bc62393c2d9bb93fe16bff4be2a8f55e"} Nov 26 05:40:27 crc kubenswrapper[4871]: E1126 05:40:27.279676 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:d65dbfc956e9cf376f3c48fc3a0942cb7306b5164f898c40d1efca106df81db7\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" podUID="51410db5-d309-4625-8f36-02cf8f0ba419" Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.280805 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" event={"ID":"6ccd73b2-dbfd-4cd6-845c-a61af4f20f96","Type":"ContainerStarted","Data":"c038f261f54ee5aea765ef43fb8a3e850f0474db0c40d46988c2dd9b3b6f9575"} Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.282186 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" event={"ID":"8c65e9f4-e3de-4bce-851a-f85c1036daa7","Type":"ContainerStarted","Data":"042ffb769dc188e90a0fd94af7d7ad7176f8a3a5ea8850324ec493ae207b2e6e"} Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.283807 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" event={"ID":"32cd59dd-1a82-4fce-81b1-ebc8f75f1e93","Type":"ContainerStarted","Data":"244ab91446e3b67307a9a4076f165086a67bd04f7ad10d65beddf361677e86c8"} Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.288198 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" event={"ID":"4659b831-32eb-4da2-97f3-f654a299605e","Type":"ContainerStarted","Data":"ce8107b6f176f84f599ed5b71811c3ea60f701535530e91b1430082d50c8e848"} Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.439178 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-2v8hx\" (UID: \"6d7ff4ed-503b-4184-8633-47598150b7f0\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:27 crc kubenswrapper[4871]: I1126 05:40:27.439294 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-2v8hx\" (UID: \"6d7ff4ed-503b-4184-8633-47598150b7f0\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:27 crc kubenswrapper[4871]: E1126 05:40:27.439371 4871 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 05:40:27 crc kubenswrapper[4871]: E1126 05:40:27.439441 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-metrics-certs podName:6d7ff4ed-503b-4184-8633-47598150b7f0 nodeName:}" failed. No retries permitted until 2025-11-26 05:40:29.439424043 +0000 UTC m=+887.622475629 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-metrics-certs") pod "openstack-operator-controller-manager-56868586f6-2v8hx" (UID: "6d7ff4ed-503b-4184-8633-47598150b7f0") : secret "metrics-server-cert" not found Nov 26 05:40:27 crc kubenswrapper[4871]: E1126 05:40:27.439489 4871 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 05:40:27 crc kubenswrapper[4871]: E1126 05:40:27.439557 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-webhook-certs podName:6d7ff4ed-503b-4184-8633-47598150b7f0 nodeName:}" failed. No retries permitted until 2025-11-26 05:40:29.439544216 +0000 UTC m=+887.622595802 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-webhook-certs") pod "openstack-operator-controller-manager-56868586f6-2v8hx" (UID: "6d7ff4ed-503b-4184-8633-47598150b7f0") : secret "webhook-server-cert" not found Nov 26 05:40:28 crc kubenswrapper[4871]: E1126 05:40:28.298167 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" podUID="0b2406e7-8b16-45e1-b726-645d22421af5" Nov 26 05:40:28 crc kubenswrapper[4871]: E1126 05:40:28.298339 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" podUID="4b0778b1-b974-4ce6-bac4-59920ab67dd7" Nov 26 05:40:28 crc kubenswrapper[4871]: E1126 05:40:28.298899 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" podUID="f68377a4-dee0-404b-988a-4f0673466e62" Nov 26 05:40:28 crc kubenswrapper[4871]: E1126 05:40:28.298994 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:d65dbfc956e9cf376f3c48fc3a0942cb7306b5164f898c40d1efca106df81db7\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" podUID="51410db5-d309-4625-8f36-02cf8f0ba419" Nov 26 05:40:28 crc kubenswrapper[4871]: E1126 05:40:28.299292 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z" podUID="1b4fb0bb-1050-4bda-acf4-c3efafc79e4a" Nov 26 05:40:28 crc kubenswrapper[4871]: E1126 05:40:28.299345 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" podUID="8d32351e-c0cc-4c2a-89b2-a79b61cf632e" Nov 26 05:40:28 crc kubenswrapper[4871]: E1126 05:40:28.299443 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" podUID="974fe30e-68b5-42bb-9940-a2000ab315f8" Nov 26 05:40:28 crc kubenswrapper[4871]: E1126 05:40:28.305281 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" podUID="1cc75505-b927-488b-8a16-4fda9a1c2dca" Nov 26 05:40:28 crc kubenswrapper[4871]: I1126 05:40:28.858048 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/06b4e3ae-765b-41c4-9334-4e33c2dc305f-cert\") pod \"infra-operator-controller-manager-57548d458d-x5hqw\" (UID: \"06b4e3ae-765b-41c4-9334-4e33c2dc305f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:40:28 crc kubenswrapper[4871]: E1126 05:40:28.858258 4871 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 05:40:28 crc kubenswrapper[4871]: E1126 05:40:28.858379 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/06b4e3ae-765b-41c4-9334-4e33c2dc305f-cert podName:06b4e3ae-765b-41c4-9334-4e33c2dc305f nodeName:}" failed. No retries permitted until 2025-11-26 05:40:32.858352504 +0000 UTC m=+891.041404130 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/06b4e3ae-765b-41c4-9334-4e33c2dc305f-cert") pod "infra-operator-controller-manager-57548d458d-x5hqw" (UID: "06b4e3ae-765b-41c4-9334-4e33c2dc305f") : secret "infra-operator-webhook-server-cert" not found Nov 26 05:40:29 crc kubenswrapper[4871]: I1126 05:40:29.162613 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg\" (UID: \"6b5541da-9198-4f49-998b-1bfd982089d1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" Nov 26 05:40:29 crc kubenswrapper[4871]: E1126 05:40:29.162833 4871 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 05:40:29 crc kubenswrapper[4871]: E1126 05:40:29.162915 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert podName:6b5541da-9198-4f49-998b-1bfd982089d1 nodeName:}" failed. No retries permitted until 2025-11-26 05:40:33.162893535 +0000 UTC m=+891.345945201 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert") pod "openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" (UID: "6b5541da-9198-4f49-998b-1bfd982089d1") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 05:40:29 crc kubenswrapper[4871]: I1126 05:40:29.467267 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-2v8hx\" (UID: \"6d7ff4ed-503b-4184-8633-47598150b7f0\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:29 crc kubenswrapper[4871]: I1126 05:40:29.467454 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-2v8hx\" (UID: \"6d7ff4ed-503b-4184-8633-47598150b7f0\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:29 crc kubenswrapper[4871]: E1126 05:40:29.467492 4871 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 05:40:29 crc kubenswrapper[4871]: E1126 05:40:29.467590 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-metrics-certs podName:6d7ff4ed-503b-4184-8633-47598150b7f0 nodeName:}" failed. No retries permitted until 2025-11-26 05:40:33.467563749 +0000 UTC m=+891.650615345 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-metrics-certs") pod "openstack-operator-controller-manager-56868586f6-2v8hx" (UID: "6d7ff4ed-503b-4184-8633-47598150b7f0") : secret "metrics-server-cert" not found Nov 26 05:40:29 crc kubenswrapper[4871]: E1126 05:40:29.467663 4871 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 05:40:29 crc kubenswrapper[4871]: E1126 05:40:29.467714 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-webhook-certs podName:6d7ff4ed-503b-4184-8633-47598150b7f0 nodeName:}" failed. No retries permitted until 2025-11-26 05:40:33.467697833 +0000 UTC m=+891.650749439 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-webhook-certs") pod "openstack-operator-controller-manager-56868586f6-2v8hx" (UID: "6d7ff4ed-503b-4184-8633-47598150b7f0") : secret "webhook-server-cert" not found Nov 26 05:40:32 crc kubenswrapper[4871]: I1126 05:40:32.917708 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/06b4e3ae-765b-41c4-9334-4e33c2dc305f-cert\") pod \"infra-operator-controller-manager-57548d458d-x5hqw\" (UID: \"06b4e3ae-765b-41c4-9334-4e33c2dc305f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:40:32 crc kubenswrapper[4871]: E1126 05:40:32.918231 4871 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 05:40:32 crc kubenswrapper[4871]: E1126 05:40:32.919503 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/06b4e3ae-765b-41c4-9334-4e33c2dc305f-cert podName:06b4e3ae-765b-41c4-9334-4e33c2dc305f nodeName:}" failed. No retries permitted until 2025-11-26 05:40:40.919483824 +0000 UTC m=+899.102535410 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/06b4e3ae-765b-41c4-9334-4e33c2dc305f-cert") pod "infra-operator-controller-manager-57548d458d-x5hqw" (UID: "06b4e3ae-765b-41c4-9334-4e33c2dc305f") : secret "infra-operator-webhook-server-cert" not found Nov 26 05:40:33 crc kubenswrapper[4871]: I1126 05:40:33.223867 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg\" (UID: \"6b5541da-9198-4f49-998b-1bfd982089d1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" Nov 26 05:40:33 crc kubenswrapper[4871]: E1126 05:40:33.224063 4871 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 05:40:33 crc kubenswrapper[4871]: E1126 05:40:33.224109 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert podName:6b5541da-9198-4f49-998b-1bfd982089d1 nodeName:}" failed. No retries permitted until 2025-11-26 05:40:41.224094697 +0000 UTC m=+899.407146283 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert") pod "openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" (UID: "6b5541da-9198-4f49-998b-1bfd982089d1") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 05:40:33 crc kubenswrapper[4871]: I1126 05:40:33.528164 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-2v8hx\" (UID: \"6d7ff4ed-503b-4184-8633-47598150b7f0\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:33 crc kubenswrapper[4871]: I1126 05:40:33.528266 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-2v8hx\" (UID: \"6d7ff4ed-503b-4184-8633-47598150b7f0\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:33 crc kubenswrapper[4871]: E1126 05:40:33.528389 4871 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 05:40:33 crc kubenswrapper[4871]: E1126 05:40:33.528434 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-metrics-certs podName:6d7ff4ed-503b-4184-8633-47598150b7f0 nodeName:}" failed. No retries permitted until 2025-11-26 05:40:41.528420773 +0000 UTC m=+899.711472359 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-metrics-certs") pod "openstack-operator-controller-manager-56868586f6-2v8hx" (UID: "6d7ff4ed-503b-4184-8633-47598150b7f0") : secret "metrics-server-cert" not found Nov 26 05:40:33 crc kubenswrapper[4871]: E1126 05:40:33.528871 4871 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 05:40:33 crc kubenswrapper[4871]: E1126 05:40:33.528964 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-webhook-certs podName:6d7ff4ed-503b-4184-8633-47598150b7f0 nodeName:}" failed. No retries permitted until 2025-11-26 05:40:41.528942326 +0000 UTC m=+899.711993922 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-webhook-certs") pod "openstack-operator-controller-manager-56868586f6-2v8hx" (UID: "6d7ff4ed-503b-4184-8633-47598150b7f0") : secret "webhook-server-cert" not found Nov 26 05:40:37 crc kubenswrapper[4871]: E1126 05:40:37.643686 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lcrr4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-955677c94-tsz49_openstack-operators(70168336-54b1-481f-b6a0-d565be07d353): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 05:40:37 crc kubenswrapper[4871]: E1126 05:40:37.645713 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" podUID="70168336-54b1-481f-b6a0-d565be07d353" Nov 26 05:40:37 crc kubenswrapper[4871]: E1126 05:40:37.647293 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-drftd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-5b77f656f-5kslm_openstack-operators(9253bdc4-d16f-42eb-8704-0965e99dfe47): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 05:40:37 crc kubenswrapper[4871]: E1126 05:40:37.649098 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" podUID="9253bdc4-d16f-42eb-8704-0965e99dfe47" Nov 26 05:40:38 crc kubenswrapper[4871]: I1126 05:40:38.397140 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" event={"ID":"6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c","Type":"ContainerStarted","Data":"52ed078c3899a8e0ea1cd7891c5a68e3843463d688ccbfb103883eace4e0e360"} Nov 26 05:40:38 crc kubenswrapper[4871]: I1126 05:40:38.404006 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" event={"ID":"70168336-54b1-481f-b6a0-d565be07d353","Type":"ContainerStarted","Data":"3088974cdf419f4d04057f0b9810ee6be73ae55ba8a84ff6b8e28058fb4afb0e"} Nov 26 05:40:38 crc kubenswrapper[4871]: I1126 05:40:38.404825 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" Nov 26 05:40:38 crc kubenswrapper[4871]: E1126 05:40:38.407212 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" podUID="70168336-54b1-481f-b6a0-d565be07d353" Nov 26 05:40:38 crc kubenswrapper[4871]: I1126 05:40:38.413085 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" event={"ID":"94ce6277-5176-415b-9f4d-847a73c93723","Type":"ContainerStarted","Data":"ac9ba9f70165f33e5d5356665cee826036e01cc5dba695debaad06db9b1520b4"} Nov 26 05:40:38 crc kubenswrapper[4871]: I1126 05:40:38.424360 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" event={"ID":"6ccd73b2-dbfd-4cd6-845c-a61af4f20f96","Type":"ContainerStarted","Data":"204903e2609664bdf3ba8c0292a1dce6d4ca30f7128cf5430e02f6daeee562fb"} Nov 26 05:40:38 crc kubenswrapper[4871]: I1126 05:40:38.430859 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" event={"ID":"9253bdc4-d16f-42eb-8704-0965e99dfe47","Type":"ContainerStarted","Data":"05f930210853fa853fe48dd63df7a0baec1e3eccccd99d2ef95036a9d87d5f33"} Nov 26 05:40:38 crc kubenswrapper[4871]: I1126 05:40:38.431646 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" Nov 26 05:40:38 crc kubenswrapper[4871]: E1126 05:40:38.432615 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" podUID="9253bdc4-d16f-42eb-8704-0965e99dfe47" Nov 26 05:40:38 crc kubenswrapper[4871]: I1126 05:40:38.434452 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" event={"ID":"33ba2b4e-6239-43c0-a694-6495b7ae2ba3","Type":"ContainerStarted","Data":"3e09aa8d4f0ef9000a5805f557fd681cb2f2582e5164380cadb9ccbd2bdb372b"} Nov 26 05:40:38 crc kubenswrapper[4871]: I1126 05:40:38.436248 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" event={"ID":"8c65e9f4-e3de-4bce-851a-f85c1036daa7","Type":"ContainerStarted","Data":"fa5104c42e1d8c3f12c9876844b14e0c381b104561c91233e350583da19f347a"} Nov 26 05:40:38 crc kubenswrapper[4871]: I1126 05:40:38.438580 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" event={"ID":"ea13fc75-b3f0-48d3-9d86-5262df2957eb","Type":"ContainerStarted","Data":"f7682c83668d74986e88f4af496b13e76c610148c3d3ceeb6b0e80d3fd97ad09"} Nov 26 05:40:38 crc kubenswrapper[4871]: I1126 05:40:38.440448 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" event={"ID":"2c7b5f25-e4ef-4abd-ba84-61b98f194ddd","Type":"ContainerStarted","Data":"c5e2d8d67778c2905e59eb3be4d5f874f0a3fcf1530a721be5cf754554d7bd7e"} Nov 26 05:40:38 crc kubenswrapper[4871]: I1126 05:40:38.441957 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" event={"ID":"32cd59dd-1a82-4fce-81b1-ebc8f75f1e93","Type":"ContainerStarted","Data":"10c02f89c12d91aa6c8a4af1b3e95d4857df0e1d88a4fff291795f84a734520f"} Nov 26 05:40:38 crc kubenswrapper[4871]: I1126 05:40:38.444043 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" event={"ID":"4659b831-32eb-4da2-97f3-f654a299605e","Type":"ContainerStarted","Data":"cbadabae3fcd702f590e75f116e55d5e3cf61fe5ed0dbb95e07144b480bf4ff5"} Nov 26 05:40:38 crc kubenswrapper[4871]: I1126 05:40:38.445783 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" event={"ID":"19a75285-dcb7-4f34-b79c-613c96d555de","Type":"ContainerStarted","Data":"23666982ea9d1c13be5b1cf0918bbcf7f5e576b5ef3c678f0a4d7f13cc70fe8c"} Nov 26 05:40:39 crc kubenswrapper[4871]: E1126 05:40:39.459479 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" podUID="70168336-54b1-481f-b6a0-d565be07d353" Nov 26 05:40:39 crc kubenswrapper[4871]: E1126 05:40:39.459517 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" podUID="9253bdc4-d16f-42eb-8704-0965e99dfe47" Nov 26 05:40:40 crc kubenswrapper[4871]: I1126 05:40:40.946691 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/06b4e3ae-765b-41c4-9334-4e33c2dc305f-cert\") pod \"infra-operator-controller-manager-57548d458d-x5hqw\" (UID: \"06b4e3ae-765b-41c4-9334-4e33c2dc305f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:40:40 crc kubenswrapper[4871]: I1126 05:40:40.954600 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/06b4e3ae-765b-41c4-9334-4e33c2dc305f-cert\") pod \"infra-operator-controller-manager-57548d458d-x5hqw\" (UID: \"06b4e3ae-765b-41c4-9334-4e33c2dc305f\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:40:41 crc kubenswrapper[4871]: I1126 05:40:41.114498 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:40:41 crc kubenswrapper[4871]: I1126 05:40:41.252645 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg\" (UID: \"6b5541da-9198-4f49-998b-1bfd982089d1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" Nov 26 05:40:41 crc kubenswrapper[4871]: E1126 05:40:41.252864 4871 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 05:40:41 crc kubenswrapper[4871]: E1126 05:40:41.252933 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert podName:6b5541da-9198-4f49-998b-1bfd982089d1 nodeName:}" failed. No retries permitted until 2025-11-26 05:40:57.252913302 +0000 UTC m=+915.435964908 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert") pod "openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" (UID: "6b5541da-9198-4f49-998b-1bfd982089d1") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 05:40:41 crc kubenswrapper[4871]: I1126 05:40:41.556612 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-2v8hx\" (UID: \"6d7ff4ed-503b-4184-8633-47598150b7f0\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:41 crc kubenswrapper[4871]: I1126 05:40:41.556721 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-2v8hx\" (UID: \"6d7ff4ed-503b-4184-8633-47598150b7f0\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:41 crc kubenswrapper[4871]: I1126 05:40:41.561981 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-metrics-certs\") pod \"openstack-operator-controller-manager-56868586f6-2v8hx\" (UID: \"6d7ff4ed-503b-4184-8633-47598150b7f0\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:41 crc kubenswrapper[4871]: I1126 05:40:41.564437 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6d7ff4ed-503b-4184-8633-47598150b7f0-webhook-certs\") pod \"openstack-operator-controller-manager-56868586f6-2v8hx\" (UID: \"6d7ff4ed-503b-4184-8633-47598150b7f0\") " pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:41 crc kubenswrapper[4871]: I1126 05:40:41.683337 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:45 crc kubenswrapper[4871]: I1126 05:40:45.303179 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" Nov 26 05:40:45 crc kubenswrapper[4871]: I1126 05:40:45.347945 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" Nov 26 05:40:45 crc kubenswrapper[4871]: I1126 05:40:45.517595 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" event={"ID":"6ccd73b2-dbfd-4cd6-845c-a61af4f20f96","Type":"ContainerStarted","Data":"34f5b160707ebbd59d4eba7c4070df539e47a9b38914b850e47f8345db0669de"} Nov 26 05:40:45 crc kubenswrapper[4871]: I1126 05:40:45.521952 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" event={"ID":"8c65e9f4-e3de-4bce-851a-f85c1036daa7","Type":"ContainerStarted","Data":"69d8ce8befb094be265f5b0129e4cfc7e0a1e9c41a0a572a209ae32f3c54c46d"} Nov 26 05:40:45 crc kubenswrapper[4871]: I1126 05:40:45.525181 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" Nov 26 05:40:45 crc kubenswrapper[4871]: I1126 05:40:45.525958 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" Nov 26 05:40:45 crc kubenswrapper[4871]: I1126 05:40:45.526920 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" event={"ID":"32cd59dd-1a82-4fce-81b1-ebc8f75f1e93","Type":"ContainerStarted","Data":"8dc3a551500874544f1463b64401408a5cda85625090614d5c4c1aa8d9aaa62d"} Nov 26 05:40:45 crc kubenswrapper[4871]: I1126 05:40:45.528420 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" event={"ID":"19a75285-dcb7-4f34-b79c-613c96d555de","Type":"ContainerStarted","Data":"04ed821bea3376a8e37ca2bcb4c4efc0a3a2f3c44f654835d03f320c35966a6d"} Nov 26 05:40:45 crc kubenswrapper[4871]: I1126 05:40:45.529842 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" Nov 26 05:40:45 crc kubenswrapper[4871]: I1126 05:40:45.530721 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" Nov 26 05:40:45 crc kubenswrapper[4871]: I1126 05:40:45.545104 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" podStartSLOduration=6.836952015 podStartE2EDuration="20.545083701s" podCreationTimestamp="2025-11-26 05:40:25 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.745785649 +0000 UTC m=+884.928837235" lastFinishedPulling="2025-11-26 05:40:40.453917335 +0000 UTC m=+898.636968921" observedRunningTime="2025-11-26 05:40:45.539062786 +0000 UTC m=+903.722114382" watchObservedRunningTime="2025-11-26 05:40:45.545083701 +0000 UTC m=+903.728135287" Nov 26 05:40:45 crc kubenswrapper[4871]: I1126 05:40:45.590410 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" podStartSLOduration=6.912630321 podStartE2EDuration="20.590380414s" podCreationTimestamp="2025-11-26 05:40:25 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.775696121 +0000 UTC m=+884.958747707" lastFinishedPulling="2025-11-26 05:40:40.453446214 +0000 UTC m=+898.636497800" observedRunningTime="2025-11-26 05:40:45.558488004 +0000 UTC m=+903.741539590" watchObservedRunningTime="2025-11-26 05:40:45.590380414 +0000 UTC m=+903.773432000" Nov 26 05:40:45 crc kubenswrapper[4871]: I1126 05:40:45.596681 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" podStartSLOduration=7.420748379 podStartE2EDuration="21.596664976s" podCreationTimestamp="2025-11-26 05:40:24 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.317152563 +0000 UTC m=+884.500204149" lastFinishedPulling="2025-11-26 05:40:40.49306916 +0000 UTC m=+898.676120746" observedRunningTime="2025-11-26 05:40:45.574114812 +0000 UTC m=+903.757166408" watchObservedRunningTime="2025-11-26 05:40:45.596664976 +0000 UTC m=+903.779716562" Nov 26 05:40:45 crc kubenswrapper[4871]: I1126 05:40:45.709260 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" Nov 26 05:40:45 crc kubenswrapper[4871]: I1126 05:40:45.711051 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" Nov 26 05:40:46 crc kubenswrapper[4871]: I1126 05:40:46.517987 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw"] Nov 26 05:40:46 crc kubenswrapper[4871]: I1126 05:40:46.555664 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" podStartSLOduration=7.503436993 podStartE2EDuration="21.555645374s" podCreationTimestamp="2025-11-26 05:40:25 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.418983461 +0000 UTC m=+884.602035047" lastFinishedPulling="2025-11-26 05:40:40.471191842 +0000 UTC m=+898.654243428" observedRunningTime="2025-11-26 05:40:46.555383577 +0000 UTC m=+904.738435163" watchObservedRunningTime="2025-11-26 05:40:46.555645374 +0000 UTC m=+904.738696960" Nov 26 05:40:46 crc kubenswrapper[4871]: W1126 05:40:46.786044 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06b4e3ae_765b_41c4_9334_4e33c2dc305f.slice/crio-176d4bfc5dad185a12c974d9cfa0ce42ab323ce4eab434a79bbaab2afffab55b WatchSource:0}: Error finding container 176d4bfc5dad185a12c974d9cfa0ce42ab323ce4eab434a79bbaab2afffab55b: Status 404 returned error can't find the container with id 176d4bfc5dad185a12c974d9cfa0ce42ab323ce4eab434a79bbaab2afffab55b Nov 26 05:40:47 crc kubenswrapper[4871]: I1126 05:40:47.198563 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx"] Nov 26 05:40:47 crc kubenswrapper[4871]: W1126 05:40:47.449800 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6d7ff4ed_503b_4184_8633_47598150b7f0.slice/crio-0aa728431a85f12f8adc4aec84d94bfebcfd2b7ed6f8674740eaa92b15bd6f38 WatchSource:0}: Error finding container 0aa728431a85f12f8adc4aec84d94bfebcfd2b7ed6f8674740eaa92b15bd6f38: Status 404 returned error can't find the container with id 0aa728431a85f12f8adc4aec84d94bfebcfd2b7ed6f8674740eaa92b15bd6f38 Nov 26 05:40:47 crc kubenswrapper[4871]: I1126 05:40:47.541937 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" event={"ID":"06b4e3ae-765b-41c4-9334-4e33c2dc305f","Type":"ContainerStarted","Data":"176d4bfc5dad185a12c974d9cfa0ce42ab323ce4eab434a79bbaab2afffab55b"} Nov 26 05:40:47 crc kubenswrapper[4871]: I1126 05:40:47.545616 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" event={"ID":"33ba2b4e-6239-43c0-a694-6495b7ae2ba3","Type":"ContainerStarted","Data":"07c28175bb1096e8ef2e2598af1c9d0602e2be1c786ab95571fda879f4599e18"} Nov 26 05:40:47 crc kubenswrapper[4871]: I1126 05:40:47.545693 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" Nov 26 05:40:47 crc kubenswrapper[4871]: I1126 05:40:47.548850 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" Nov 26 05:40:47 crc kubenswrapper[4871]: I1126 05:40:47.549501 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" event={"ID":"6d7ff4ed-503b-4184-8633-47598150b7f0","Type":"ContainerStarted","Data":"0aa728431a85f12f8adc4aec84d94bfebcfd2b7ed6f8674740eaa92b15bd6f38"} Nov 26 05:40:47 crc kubenswrapper[4871]: I1126 05:40:47.549704 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" Nov 26 05:40:47 crc kubenswrapper[4871]: I1126 05:40:47.551459 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" Nov 26 05:40:47 crc kubenswrapper[4871]: I1126 05:40:47.568925 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" podStartSLOduration=8.807722647 podStartE2EDuration="22.568904853s" podCreationTimestamp="2025-11-26 05:40:25 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.775258881 +0000 UTC m=+884.958310467" lastFinishedPulling="2025-11-26 05:40:40.536441077 +0000 UTC m=+898.719492673" observedRunningTime="2025-11-26 05:40:47.5638232 +0000 UTC m=+905.746874806" watchObservedRunningTime="2025-11-26 05:40:47.568904853 +0000 UTC m=+905.751956449" Nov 26 05:40:48 crc kubenswrapper[4871]: I1126 05:40:48.558674 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" event={"ID":"2c7b5f25-e4ef-4abd-ba84-61b98f194ddd","Type":"ContainerStarted","Data":"b54d87f4421009467e88f0bab9b717cb23218f4cd3e9dcdac2cde436142449a9"} Nov 26 05:40:48 crc kubenswrapper[4871]: I1126 05:40:48.559849 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" Nov 26 05:40:48 crc kubenswrapper[4871]: I1126 05:40:48.562920 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" Nov 26 05:40:48 crc kubenswrapper[4871]: I1126 05:40:48.576895 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" podStartSLOduration=4.778135476 podStartE2EDuration="23.576881704s" podCreationTimestamp="2025-11-26 05:40:25 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.775053566 +0000 UTC m=+884.958105162" lastFinishedPulling="2025-11-26 05:40:45.573799804 +0000 UTC m=+903.756851390" observedRunningTime="2025-11-26 05:40:48.574600589 +0000 UTC m=+906.757652175" watchObservedRunningTime="2025-11-26 05:40:48.576881704 +0000 UTC m=+906.759933290" Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.613668 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" event={"ID":"4659b831-32eb-4da2-97f3-f654a299605e","Type":"ContainerStarted","Data":"e68c6b23259ca818a212ee7d2e9e4e5c2953e87481f2324dc972e756c2969821"} Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.614588 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.621138 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.633199 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" event={"ID":"9253bdc4-d16f-42eb-8704-0965e99dfe47","Type":"ContainerStarted","Data":"2c9dfc4b716287e672d6c814363a7e2b854a0362ee3b0b8315c5cda4b8c14872"} Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.649325 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" event={"ID":"94ce6277-5176-415b-9f4d-847a73c93723","Type":"ContainerStarted","Data":"8ce443f490443a6f5d2a61a6c70022eb5898d7092ebf4ffe318c6a973aa0a724"} Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.650069 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" podStartSLOduration=7.227859278 podStartE2EDuration="27.650042993s" podCreationTimestamp="2025-11-26 05:40:25 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.411226964 +0000 UTC m=+884.594278550" lastFinishedPulling="2025-11-26 05:40:46.833410679 +0000 UTC m=+905.016462265" observedRunningTime="2025-11-26 05:40:52.638170654 +0000 UTC m=+910.821222250" watchObservedRunningTime="2025-11-26 05:40:52.650042993 +0000 UTC m=+910.833094589" Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.650338 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.653741 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.657427 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" event={"ID":"f68377a4-dee0-404b-988a-4f0673466e62","Type":"ContainerStarted","Data":"cd67ed0a097dfab3036663f2143ae697d23ba1e621e97bb593ebb6b539890fb8"} Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.694847 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" event={"ID":"51410db5-d309-4625-8f36-02cf8f0ba419","Type":"ContainerStarted","Data":"d834d7cdfd9b843260c63f2c182c2499ca4b36944bbbbf70f194c781a4cf63db"} Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.700561 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" podStartSLOduration=17.995364158 podStartE2EDuration="28.700539017s" podCreationTimestamp="2025-11-26 05:40:24 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.417661209 +0000 UTC m=+884.600712795" lastFinishedPulling="2025-11-26 05:40:37.122836028 +0000 UTC m=+895.305887654" observedRunningTime="2025-11-26 05:40:52.695284558 +0000 UTC m=+910.878336154" watchObservedRunningTime="2025-11-26 05:40:52.700539017 +0000 UTC m=+910.883590603" Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.730159 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z" event={"ID":"1b4fb0bb-1050-4bda-acf4-c3efafc79e4a","Type":"ContainerStarted","Data":"d36a40705e9b7284171531fab3a7cac3fc3e28538ef15d4e62de5ed4e3b8d8fd"} Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.731416 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" podStartSLOduration=8.217715343 podStartE2EDuration="28.731390217s" podCreationTimestamp="2025-11-26 05:40:24 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.321055257 +0000 UTC m=+884.504106833" lastFinishedPulling="2025-11-26 05:40:46.834730121 +0000 UTC m=+905.017781707" observedRunningTime="2025-11-26 05:40:52.730435885 +0000 UTC m=+910.913487471" watchObservedRunningTime="2025-11-26 05:40:52.731390217 +0000 UTC m=+910.914441843" Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.756383 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" event={"ID":"1cc75505-b927-488b-8a16-4fda9a1c2dca","Type":"ContainerStarted","Data":"19029519a721c5302346221f5c769fdd20e20118b672d0b32843135dcb6b5823"} Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.797809 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" event={"ID":"ea13fc75-b3f0-48d3-9d86-5262df2957eb","Type":"ContainerStarted","Data":"03f9a9062193d93d19214268cbec325304d7c2450d67f2720c6dce8e0f81792d"} Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.798295 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.801607 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.860505 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" event={"ID":"70168336-54b1-481f-b6a0-d565be07d353","Type":"ContainerStarted","Data":"65d30f5d6dc8b111b41decc21cf6978715a1b2675e754b3bc486de0e60597b26"} Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.866859 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" podStartSLOduration=9.658489341 podStartE2EDuration="28.866846447s" podCreationTimestamp="2025-11-26 05:40:24 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.315970614 +0000 UTC m=+884.499022190" lastFinishedPulling="2025-11-26 05:40:45.52432771 +0000 UTC m=+903.707379296" observedRunningTime="2025-11-26 05:40:52.824812834 +0000 UTC m=+911.007864420" watchObservedRunningTime="2025-11-26 05:40:52.866846447 +0000 UTC m=+911.049898033" Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.937895 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" event={"ID":"0b2406e7-8b16-45e1-b726-645d22421af5","Type":"ContainerStarted","Data":"7fe791832885cb9163ead7515fa99cf130c635e5f80e0ae2fe08996fff79a99d"} Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.967900 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" event={"ID":"974fe30e-68b5-42bb-9940-a2000ab315f8","Type":"ContainerStarted","Data":"729788fa0846702a61a6484701aa0fb0b413761972b78d47bc32a45d763469e9"} Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.968497 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.979507 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" podStartSLOduration=18.261151154 podStartE2EDuration="28.97948091s" podCreationTimestamp="2025-11-26 05:40:24 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.395499324 +0000 UTC m=+884.578550910" lastFinishedPulling="2025-11-26 05:40:37.11382907 +0000 UTC m=+895.296880666" observedRunningTime="2025-11-26 05:40:52.910079587 +0000 UTC m=+911.093131163" watchObservedRunningTime="2025-11-26 05:40:52.97948091 +0000 UTC m=+911.162532496" Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.996375 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" event={"ID":"6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c","Type":"ContainerStarted","Data":"18f70cd37c98cda2b201663539223b53d1b4e94ab77cffbc87cc3de81ab0f584"} Nov 26 05:40:52 crc kubenswrapper[4871]: I1126 05:40:52.997415 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" Nov 26 05:40:53 crc kubenswrapper[4871]: I1126 05:40:53.012245 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" podStartSLOduration=2.766656677 podStartE2EDuration="28.012226392s" podCreationTimestamp="2025-11-26 05:40:25 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.839782358 +0000 UTC m=+885.022833944" lastFinishedPulling="2025-11-26 05:40:52.085352073 +0000 UTC m=+910.268403659" observedRunningTime="2025-11-26 05:40:52.9728877 +0000 UTC m=+911.155939286" watchObservedRunningTime="2025-11-26 05:40:53.012226392 +0000 UTC m=+911.195277978" Nov 26 05:40:53 crc kubenswrapper[4871]: I1126 05:40:53.013960 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" podStartSLOduration=2.842669558 podStartE2EDuration="28.013954961s" podCreationTimestamp="2025-11-26 05:40:25 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.838729273 +0000 UTC m=+885.021780859" lastFinishedPulling="2025-11-26 05:40:52.010014676 +0000 UTC m=+910.193066262" observedRunningTime="2025-11-26 05:40:53.012328934 +0000 UTC m=+911.195380520" watchObservedRunningTime="2025-11-26 05:40:53.013954961 +0000 UTC m=+911.197006547" Nov 26 05:40:53 crc kubenswrapper[4871]: I1126 05:40:53.030256 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" Nov 26 05:40:53 crc kubenswrapper[4871]: I1126 05:40:53.110554 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" podStartSLOduration=6.77763134 podStartE2EDuration="28.110536449s" podCreationTimestamp="2025-11-26 05:40:25 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.776843009 +0000 UTC m=+884.959894595" lastFinishedPulling="2025-11-26 05:40:48.109748088 +0000 UTC m=+906.292799704" observedRunningTime="2025-11-26 05:40:53.079901725 +0000 UTC m=+911.262953311" watchObservedRunningTime="2025-11-26 05:40:53.110536449 +0000 UTC m=+911.293588035" Nov 26 05:40:53 crc kubenswrapper[4871]: I1126 05:40:53.615377 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:40:53 crc kubenswrapper[4871]: I1126 05:40:53.615449 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.004419 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" event={"ID":"51410db5-d309-4625-8f36-02cf8f0ba419","Type":"ContainerStarted","Data":"4a91e2e1e91c34b0838d1174bcd304e0d4b30a90babb320a8b8a93fd3588e5f9"} Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.004795 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.006164 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" event={"ID":"974fe30e-68b5-42bb-9940-a2000ab315f8","Type":"ContainerStarted","Data":"28b51e0b49b78ac765880a94d25f39a4d37bbcd1459a58587abdd579f034699f"} Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.007797 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" event={"ID":"f68377a4-dee0-404b-988a-4f0673466e62","Type":"ContainerStarted","Data":"183bbf523a7450a3a251b32102caf54b8407fe804b209e492697e6e8a77013a2"} Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.007968 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.009198 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" event={"ID":"6d7ff4ed-503b-4184-8633-47598150b7f0","Type":"ContainerStarted","Data":"fabd64274ddc70e6258b0580259ca3c9ad99d6c7e81edaba0f0e641ca6684029"} Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.009661 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.011407 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" event={"ID":"8d32351e-c0cc-4c2a-89b2-a79b61cf632e","Type":"ContainerStarted","Data":"ea0cbddcc35812768c10a376b6a223f0c3cc57a731e4b7cb3f53d9a0f6385674"} Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.011430 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" event={"ID":"8d32351e-c0cc-4c2a-89b2-a79b61cf632e","Type":"ContainerStarted","Data":"49b6c7b8dd8536e2c3295be101ee08b218c4f0fb7f20daa6a4e49204e8d5816d"} Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.011556 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.012893 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" event={"ID":"4b0778b1-b974-4ce6-bac4-59920ab67dd7","Type":"ContainerStarted","Data":"5cbd0e232735b7021b920963d8d53850b17d7af579e0382ac54ae0ac54646789"} Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.012913 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" event={"ID":"4b0778b1-b974-4ce6-bac4-59920ab67dd7","Type":"ContainerStarted","Data":"d9772fd8b68909e3ca0767e5b83edbc3a379833a1b710577a69ebe4c62e4b56f"} Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.013119 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.014544 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" event={"ID":"06b4e3ae-765b-41c4-9334-4e33c2dc305f","Type":"ContainerStarted","Data":"e931e26b899b76e22ea16c2a732fdf8dc1c60198ef56d493be4ae26aca65d318"} Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.014574 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" event={"ID":"06b4e3ae-765b-41c4-9334-4e33c2dc305f","Type":"ContainerStarted","Data":"e77d7460d38ef447f45dff46723b8bac6ac72e9643ab7b192218ee88cae7763e"} Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.014956 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.016396 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z" event={"ID":"1b4fb0bb-1050-4bda-acf4-c3efafc79e4a","Type":"ContainerStarted","Data":"18a135648208bc34d6ee8310b12ec92064ac8307d50bb0109afb66bf255d0228"} Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.016890 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z" Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.019427 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" event={"ID":"1cc75505-b927-488b-8a16-4fda9a1c2dca","Type":"ContainerStarted","Data":"c6af7130fefffafb2187175c857ee6f33601f65b90ebf256b92c5edbdfc9b945"} Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.025091 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" podStartSLOduration=3.777943357 podStartE2EDuration="29.025073308s" podCreationTimestamp="2025-11-26 05:40:25 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.839102352 +0000 UTC m=+885.022153938" lastFinishedPulling="2025-11-26 05:40:52.086232303 +0000 UTC m=+910.269283889" observedRunningTime="2025-11-26 05:40:54.019370109 +0000 UTC m=+912.202421695" watchObservedRunningTime="2025-11-26 05:40:54.025073308 +0000 UTC m=+912.208124904" Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.034585 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" podStartSLOduration=23.689268984 podStartE2EDuration="29.034562613s" podCreationTimestamp="2025-11-26 05:40:25 +0000 UTC" firstStartedPulling="2025-11-26 05:40:46.793749261 +0000 UTC m=+904.976800847" lastFinishedPulling="2025-11-26 05:40:52.13904289 +0000 UTC m=+910.322094476" observedRunningTime="2025-11-26 05:40:54.032045196 +0000 UTC m=+912.215096802" watchObservedRunningTime="2025-11-26 05:40:54.034562613 +0000 UTC m=+912.217614199" Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.048261 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z" podStartSLOduration=3.844027274 podStartE2EDuration="29.048246663s" podCreationTimestamp="2025-11-26 05:40:25 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.805605963 +0000 UTC m=+884.988657549" lastFinishedPulling="2025-11-26 05:40:52.009825342 +0000 UTC m=+910.192876938" observedRunningTime="2025-11-26 05:40:54.046620916 +0000 UTC m=+912.229672502" watchObservedRunningTime="2025-11-26 05:40:54.048246663 +0000 UTC m=+912.231298249" Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.073018 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" podStartSLOduration=7.740229768 podStartE2EDuration="29.073001974s" podCreationTimestamp="2025-11-26 05:40:25 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.779055152 +0000 UTC m=+884.962106738" lastFinishedPulling="2025-11-26 05:40:48.111827318 +0000 UTC m=+906.294878944" observedRunningTime="2025-11-26 05:40:54.068578544 +0000 UTC m=+912.251630150" watchObservedRunningTime="2025-11-26 05:40:54.073001974 +0000 UTC m=+912.256053560" Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.116222 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" podStartSLOduration=3.841733472 podStartE2EDuration="29.116207943s" podCreationTimestamp="2025-11-26 05:40:25 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.84607242 +0000 UTC m=+885.029124006" lastFinishedPulling="2025-11-26 05:40:52.120546901 +0000 UTC m=+910.303598477" observedRunningTime="2025-11-26 05:40:54.11517889 +0000 UTC m=+912.298230476" watchObservedRunningTime="2025-11-26 05:40:54.116207943 +0000 UTC m=+912.299259529" Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.120803 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" podStartSLOduration=29.120795147 podStartE2EDuration="29.120795147s" podCreationTimestamp="2025-11-26 05:40:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:40:54.102462482 +0000 UTC m=+912.285514068" watchObservedRunningTime="2025-11-26 05:40:54.120795147 +0000 UTC m=+912.303846733" Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.132034 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" podStartSLOduration=3.898550643 podStartE2EDuration="29.132012752s" podCreationTimestamp="2025-11-26 05:40:25 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.847423163 +0000 UTC m=+885.030474749" lastFinishedPulling="2025-11-26 05:40:52.080885272 +0000 UTC m=+910.263936858" observedRunningTime="2025-11-26 05:40:54.130138839 +0000 UTC m=+912.313190425" watchObservedRunningTime="2025-11-26 05:40:54.132012752 +0000 UTC m=+912.315064328" Nov 26 05:40:54 crc kubenswrapper[4871]: I1126 05:40:54.143017 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" podStartSLOduration=3.861048789 podStartE2EDuration="29.143001451s" podCreationTimestamp="2025-11-26 05:40:25 +0000 UTC" firstStartedPulling="2025-11-26 05:40:26.838663291 +0000 UTC m=+885.021714867" lastFinishedPulling="2025-11-26 05:40:52.120615943 +0000 UTC m=+910.303667529" observedRunningTime="2025-11-26 05:40:54.142375647 +0000 UTC m=+912.325427233" watchObservedRunningTime="2025-11-26 05:40:54.143001451 +0000 UTC m=+912.326053037" Nov 26 05:40:55 crc kubenswrapper[4871]: I1126 05:40:55.028179 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" Nov 26 05:40:57 crc kubenswrapper[4871]: I1126 05:40:57.294831 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg\" (UID: \"6b5541da-9198-4f49-998b-1bfd982089d1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" Nov 26 05:40:57 crc kubenswrapper[4871]: I1126 05:40:57.303810 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6b5541da-9198-4f49-998b-1bfd982089d1-cert\") pod \"openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg\" (UID: \"6b5541da-9198-4f49-998b-1bfd982089d1\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" Nov 26 05:40:57 crc kubenswrapper[4871]: I1126 05:40:57.504464 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-d6qg9" Nov 26 05:40:57 crc kubenswrapper[4871]: I1126 05:40:57.513265 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" Nov 26 05:40:57 crc kubenswrapper[4871]: I1126 05:40:57.937948 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg"] Nov 26 05:40:58 crc kubenswrapper[4871]: I1126 05:40:58.062222 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" event={"ID":"6b5541da-9198-4f49-998b-1bfd982089d1","Type":"ContainerStarted","Data":"075a5ced493ea1f570402bfe39d270f44627afc830bbdfcfe2fdd296435450dc"} Nov 26 05:41:00 crc kubenswrapper[4871]: I1126 05:41:00.080178 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" event={"ID":"6b5541da-9198-4f49-998b-1bfd982089d1","Type":"ContainerStarted","Data":"a230d44a0f8af03925d0268b71691a6ee2741f0bffda5a602fee97be5da0d0d5"} Nov 26 05:41:00 crc kubenswrapper[4871]: I1126 05:41:00.081778 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" event={"ID":"6b5541da-9198-4f49-998b-1bfd982089d1","Type":"ContainerStarted","Data":"485b7809110277357899f2ad77f5ee26c11ef81bd52aefaebf5d24da22de60c7"} Nov 26 05:41:00 crc kubenswrapper[4871]: I1126 05:41:00.081875 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" Nov 26 05:41:00 crc kubenswrapper[4871]: I1126 05:41:00.117106 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" podStartSLOduration=33.613656574 podStartE2EDuration="35.117086521s" podCreationTimestamp="2025-11-26 05:40:25 +0000 UTC" firstStartedPulling="2025-11-26 05:40:57.94819316 +0000 UTC m=+916.131244756" lastFinishedPulling="2025-11-26 05:40:59.451623087 +0000 UTC m=+917.634674703" observedRunningTime="2025-11-26 05:41:00.110329297 +0000 UTC m=+918.293380903" watchObservedRunningTime="2025-11-26 05:41:00.117086521 +0000 UTC m=+918.300138107" Nov 26 05:41:01 crc kubenswrapper[4871]: I1126 05:41:01.124081 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:41:01 crc kubenswrapper[4871]: I1126 05:41:01.689317 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:41:05 crc kubenswrapper[4871]: I1126 05:41:05.529708 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" Nov 26 05:41:05 crc kubenswrapper[4871]: I1126 05:41:05.643928 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" Nov 26 05:41:05 crc kubenswrapper[4871]: I1126 05:41:05.742792 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" Nov 26 05:41:05 crc kubenswrapper[4871]: I1126 05:41:05.813692 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" Nov 26 05:41:05 crc kubenswrapper[4871]: I1126 05:41:05.896929 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" Nov 26 05:41:05 crc kubenswrapper[4871]: I1126 05:41:05.901182 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z" Nov 26 05:41:06 crc kubenswrapper[4871]: I1126 05:41:06.048134 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" Nov 26 05:41:07 crc kubenswrapper[4871]: I1126 05:41:07.522424 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" Nov 26 05:41:23 crc kubenswrapper[4871]: I1126 05:41:23.614658 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:41:23 crc kubenswrapper[4871]: I1126 05:41:23.615390 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:41:23 crc kubenswrapper[4871]: I1126 05:41:23.615459 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:41:23 crc kubenswrapper[4871]: I1126 05:41:23.616268 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"352b2b280740af55cbe8f36dbe220adf905af3370f34cf811c417077b6fe54f3"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 05:41:23 crc kubenswrapper[4871]: I1126 05:41:23.616366 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://352b2b280740af55cbe8f36dbe220adf905af3370f34cf811c417077b6fe54f3" gracePeriod=600 Nov 26 05:41:24 crc kubenswrapper[4871]: I1126 05:41:24.301785 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="352b2b280740af55cbe8f36dbe220adf905af3370f34cf811c417077b6fe54f3" exitCode=0 Nov 26 05:41:24 crc kubenswrapper[4871]: I1126 05:41:24.301833 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"352b2b280740af55cbe8f36dbe220adf905af3370f34cf811c417077b6fe54f3"} Nov 26 05:41:24 crc kubenswrapper[4871]: I1126 05:41:24.302216 4871 scope.go:117] "RemoveContainer" containerID="f9b3c6b7dc711fbab7cfc1df233a4b33f288cd38725d31ae281cb8abef183fd7" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.168457 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8468885bfc-5xcmc"] Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.175722 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8468885bfc-5xcmc" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.177220 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8468885bfc-5xcmc"] Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.182620 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.182826 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.182936 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.183034 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-876rq" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.237285 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dcea5db-05f7-4743-a5d6-0444dc2d2b6e-config\") pod \"dnsmasq-dns-8468885bfc-5xcmc\" (UID: \"0dcea5db-05f7-4743-a5d6-0444dc2d2b6e\") " pod="openstack/dnsmasq-dns-8468885bfc-5xcmc" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.237373 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcd92\" (UniqueName: \"kubernetes.io/projected/0dcea5db-05f7-4743-a5d6-0444dc2d2b6e-kube-api-access-mcd92\") pod \"dnsmasq-dns-8468885bfc-5xcmc\" (UID: \"0dcea5db-05f7-4743-a5d6-0444dc2d2b6e\") " pod="openstack/dnsmasq-dns-8468885bfc-5xcmc" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.240416 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-545d49fd5c-jn2t4"] Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.241490 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-545d49fd5c-jn2t4" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.244606 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.279934 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-545d49fd5c-jn2t4"] Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.330177 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"5865561ff4962bde5a4a448acaaef84f57651a9dc7c55ecf0253e295a67c98b1"} Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.339067 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31249d26-77aa-43b4-b6a2-6152cf3bf986-config\") pod \"dnsmasq-dns-545d49fd5c-jn2t4\" (UID: \"31249d26-77aa-43b4-b6a2-6152cf3bf986\") " pod="openstack/dnsmasq-dns-545d49fd5c-jn2t4" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.339154 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcd92\" (UniqueName: \"kubernetes.io/projected/0dcea5db-05f7-4743-a5d6-0444dc2d2b6e-kube-api-access-mcd92\") pod \"dnsmasq-dns-8468885bfc-5xcmc\" (UID: \"0dcea5db-05f7-4743-a5d6-0444dc2d2b6e\") " pod="openstack/dnsmasq-dns-8468885bfc-5xcmc" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.339190 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31249d26-77aa-43b4-b6a2-6152cf3bf986-dns-svc\") pod \"dnsmasq-dns-545d49fd5c-jn2t4\" (UID: \"31249d26-77aa-43b4-b6a2-6152cf3bf986\") " pod="openstack/dnsmasq-dns-545d49fd5c-jn2t4" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.339245 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsxlc\" (UniqueName: \"kubernetes.io/projected/31249d26-77aa-43b4-b6a2-6152cf3bf986-kube-api-access-wsxlc\") pod \"dnsmasq-dns-545d49fd5c-jn2t4\" (UID: \"31249d26-77aa-43b4-b6a2-6152cf3bf986\") " pod="openstack/dnsmasq-dns-545d49fd5c-jn2t4" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.339418 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dcea5db-05f7-4743-a5d6-0444dc2d2b6e-config\") pod \"dnsmasq-dns-8468885bfc-5xcmc\" (UID: \"0dcea5db-05f7-4743-a5d6-0444dc2d2b6e\") " pod="openstack/dnsmasq-dns-8468885bfc-5xcmc" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.340337 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dcea5db-05f7-4743-a5d6-0444dc2d2b6e-config\") pod \"dnsmasq-dns-8468885bfc-5xcmc\" (UID: \"0dcea5db-05f7-4743-a5d6-0444dc2d2b6e\") " pod="openstack/dnsmasq-dns-8468885bfc-5xcmc" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.373920 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcd92\" (UniqueName: \"kubernetes.io/projected/0dcea5db-05f7-4743-a5d6-0444dc2d2b6e-kube-api-access-mcd92\") pod \"dnsmasq-dns-8468885bfc-5xcmc\" (UID: \"0dcea5db-05f7-4743-a5d6-0444dc2d2b6e\") " pod="openstack/dnsmasq-dns-8468885bfc-5xcmc" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.440948 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31249d26-77aa-43b4-b6a2-6152cf3bf986-config\") pod \"dnsmasq-dns-545d49fd5c-jn2t4\" (UID: \"31249d26-77aa-43b4-b6a2-6152cf3bf986\") " pod="openstack/dnsmasq-dns-545d49fd5c-jn2t4" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.441654 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31249d26-77aa-43b4-b6a2-6152cf3bf986-config\") pod \"dnsmasq-dns-545d49fd5c-jn2t4\" (UID: \"31249d26-77aa-43b4-b6a2-6152cf3bf986\") " pod="openstack/dnsmasq-dns-545d49fd5c-jn2t4" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.441724 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31249d26-77aa-43b4-b6a2-6152cf3bf986-dns-svc\") pod \"dnsmasq-dns-545d49fd5c-jn2t4\" (UID: \"31249d26-77aa-43b4-b6a2-6152cf3bf986\") " pod="openstack/dnsmasq-dns-545d49fd5c-jn2t4" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.442222 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31249d26-77aa-43b4-b6a2-6152cf3bf986-dns-svc\") pod \"dnsmasq-dns-545d49fd5c-jn2t4\" (UID: \"31249d26-77aa-43b4-b6a2-6152cf3bf986\") " pod="openstack/dnsmasq-dns-545d49fd5c-jn2t4" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.442288 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsxlc\" (UniqueName: \"kubernetes.io/projected/31249d26-77aa-43b4-b6a2-6152cf3bf986-kube-api-access-wsxlc\") pod \"dnsmasq-dns-545d49fd5c-jn2t4\" (UID: \"31249d26-77aa-43b4-b6a2-6152cf3bf986\") " pod="openstack/dnsmasq-dns-545d49fd5c-jn2t4" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.461277 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsxlc\" (UniqueName: \"kubernetes.io/projected/31249d26-77aa-43b4-b6a2-6152cf3bf986-kube-api-access-wsxlc\") pod \"dnsmasq-dns-545d49fd5c-jn2t4\" (UID: \"31249d26-77aa-43b4-b6a2-6152cf3bf986\") " pod="openstack/dnsmasq-dns-545d49fd5c-jn2t4" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.521006 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8468885bfc-5xcmc" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.561809 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-545d49fd5c-jn2t4" Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.837980 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-545d49fd5c-jn2t4"] Nov 26 05:41:26 crc kubenswrapper[4871]: I1126 05:41:26.987638 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8468885bfc-5xcmc"] Nov 26 05:41:26 crc kubenswrapper[4871]: W1126 05:41:26.989708 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0dcea5db_05f7_4743_a5d6_0444dc2d2b6e.slice/crio-777fc58572517fab082d875a97c729ccab8bdb83104577608628bb2ec3cb6869 WatchSource:0}: Error finding container 777fc58572517fab082d875a97c729ccab8bdb83104577608628bb2ec3cb6869: Status 404 returned error can't find the container with id 777fc58572517fab082d875a97c729ccab8bdb83104577608628bb2ec3cb6869 Nov 26 05:41:27 crc kubenswrapper[4871]: I1126 05:41:27.340039 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8468885bfc-5xcmc" event={"ID":"0dcea5db-05f7-4743-a5d6-0444dc2d2b6e","Type":"ContainerStarted","Data":"777fc58572517fab082d875a97c729ccab8bdb83104577608628bb2ec3cb6869"} Nov 26 05:41:27 crc kubenswrapper[4871]: I1126 05:41:27.341635 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-545d49fd5c-jn2t4" event={"ID":"31249d26-77aa-43b4-b6a2-6152cf3bf986","Type":"ContainerStarted","Data":"cebcb9530d82b63601213f5031a4ddbefdbb56f4382dccee313c5564fee7cc2a"} Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.296067 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-545d49fd5c-jn2t4"] Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.323335 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b9b4959cc-mjnvz"] Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.325069 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b9b4959cc-mjnvz" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.347188 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b9b4959cc-mjnvz"] Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.420058 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkxxz\" (UniqueName: \"kubernetes.io/projected/cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e-kube-api-access-hkxxz\") pod \"dnsmasq-dns-b9b4959cc-mjnvz\" (UID: \"cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e\") " pod="openstack/dnsmasq-dns-b9b4959cc-mjnvz" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.420111 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e-config\") pod \"dnsmasq-dns-b9b4959cc-mjnvz\" (UID: \"cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e\") " pod="openstack/dnsmasq-dns-b9b4959cc-mjnvz" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.420165 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e-dns-svc\") pod \"dnsmasq-dns-b9b4959cc-mjnvz\" (UID: \"cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e\") " pod="openstack/dnsmasq-dns-b9b4959cc-mjnvz" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.520910 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkxxz\" (UniqueName: \"kubernetes.io/projected/cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e-kube-api-access-hkxxz\") pod \"dnsmasq-dns-b9b4959cc-mjnvz\" (UID: \"cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e\") " pod="openstack/dnsmasq-dns-b9b4959cc-mjnvz" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.521346 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e-config\") pod \"dnsmasq-dns-b9b4959cc-mjnvz\" (UID: \"cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e\") " pod="openstack/dnsmasq-dns-b9b4959cc-mjnvz" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.521424 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e-dns-svc\") pod \"dnsmasq-dns-b9b4959cc-mjnvz\" (UID: \"cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e\") " pod="openstack/dnsmasq-dns-b9b4959cc-mjnvz" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.522470 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e-dns-svc\") pod \"dnsmasq-dns-b9b4959cc-mjnvz\" (UID: \"cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e\") " pod="openstack/dnsmasq-dns-b9b4959cc-mjnvz" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.522915 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e-config\") pod \"dnsmasq-dns-b9b4959cc-mjnvz\" (UID: \"cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e\") " pod="openstack/dnsmasq-dns-b9b4959cc-mjnvz" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.544678 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkxxz\" (UniqueName: \"kubernetes.io/projected/cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e-kube-api-access-hkxxz\") pod \"dnsmasq-dns-b9b4959cc-mjnvz\" (UID: \"cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e\") " pod="openstack/dnsmasq-dns-b9b4959cc-mjnvz" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.601078 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8468885bfc-5xcmc"] Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.624636 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86b8f4ff9-gzswq"] Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.628399 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86b8f4ff9-gzswq" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.650040 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b9b4959cc-mjnvz" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.657788 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86b8f4ff9-gzswq"] Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.726950 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdqjx\" (UniqueName: \"kubernetes.io/projected/c81cfa5f-8875-4d7d-ad9e-5c22439a7820-kube-api-access-mdqjx\") pod \"dnsmasq-dns-86b8f4ff9-gzswq\" (UID: \"c81cfa5f-8875-4d7d-ad9e-5c22439a7820\") " pod="openstack/dnsmasq-dns-86b8f4ff9-gzswq" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.727005 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c81cfa5f-8875-4d7d-ad9e-5c22439a7820-config\") pod \"dnsmasq-dns-86b8f4ff9-gzswq\" (UID: \"c81cfa5f-8875-4d7d-ad9e-5c22439a7820\") " pod="openstack/dnsmasq-dns-86b8f4ff9-gzswq" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.727088 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c81cfa5f-8875-4d7d-ad9e-5c22439a7820-dns-svc\") pod \"dnsmasq-dns-86b8f4ff9-gzswq\" (UID: \"c81cfa5f-8875-4d7d-ad9e-5c22439a7820\") " pod="openstack/dnsmasq-dns-86b8f4ff9-gzswq" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.828791 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdqjx\" (UniqueName: \"kubernetes.io/projected/c81cfa5f-8875-4d7d-ad9e-5c22439a7820-kube-api-access-mdqjx\") pod \"dnsmasq-dns-86b8f4ff9-gzswq\" (UID: \"c81cfa5f-8875-4d7d-ad9e-5c22439a7820\") " pod="openstack/dnsmasq-dns-86b8f4ff9-gzswq" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.828852 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c81cfa5f-8875-4d7d-ad9e-5c22439a7820-config\") pod \"dnsmasq-dns-86b8f4ff9-gzswq\" (UID: \"c81cfa5f-8875-4d7d-ad9e-5c22439a7820\") " pod="openstack/dnsmasq-dns-86b8f4ff9-gzswq" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.828940 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c81cfa5f-8875-4d7d-ad9e-5c22439a7820-dns-svc\") pod \"dnsmasq-dns-86b8f4ff9-gzswq\" (UID: \"c81cfa5f-8875-4d7d-ad9e-5c22439a7820\") " pod="openstack/dnsmasq-dns-86b8f4ff9-gzswq" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.829792 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c81cfa5f-8875-4d7d-ad9e-5c22439a7820-dns-svc\") pod \"dnsmasq-dns-86b8f4ff9-gzswq\" (UID: \"c81cfa5f-8875-4d7d-ad9e-5c22439a7820\") " pod="openstack/dnsmasq-dns-86b8f4ff9-gzswq" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.830455 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c81cfa5f-8875-4d7d-ad9e-5c22439a7820-config\") pod \"dnsmasq-dns-86b8f4ff9-gzswq\" (UID: \"c81cfa5f-8875-4d7d-ad9e-5c22439a7820\") " pod="openstack/dnsmasq-dns-86b8f4ff9-gzswq" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.874413 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdqjx\" (UniqueName: \"kubernetes.io/projected/c81cfa5f-8875-4d7d-ad9e-5c22439a7820-kube-api-access-mdqjx\") pod \"dnsmasq-dns-86b8f4ff9-gzswq\" (UID: \"c81cfa5f-8875-4d7d-ad9e-5c22439a7820\") " pod="openstack/dnsmasq-dns-86b8f4ff9-gzswq" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.906226 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86b8f4ff9-gzswq"] Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.906820 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86b8f4ff9-gzswq" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.923724 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5449989c59-wmxsr"] Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.930289 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5449989c59-wmxsr" Nov 26 05:41:30 crc kubenswrapper[4871]: I1126 05:41:30.945038 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5449989c59-wmxsr"] Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.031310 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e2c68410-1c80-45dc-b1be-ed9307460cd8-dns-svc\") pod \"dnsmasq-dns-5449989c59-wmxsr\" (UID: \"e2c68410-1c80-45dc-b1be-ed9307460cd8\") " pod="openstack/dnsmasq-dns-5449989c59-wmxsr" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.031376 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2c68410-1c80-45dc-b1be-ed9307460cd8-config\") pod \"dnsmasq-dns-5449989c59-wmxsr\" (UID: \"e2c68410-1c80-45dc-b1be-ed9307460cd8\") " pod="openstack/dnsmasq-dns-5449989c59-wmxsr" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.031438 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwnhb\" (UniqueName: \"kubernetes.io/projected/e2c68410-1c80-45dc-b1be-ed9307460cd8-kube-api-access-mwnhb\") pod \"dnsmasq-dns-5449989c59-wmxsr\" (UID: \"e2c68410-1c80-45dc-b1be-ed9307460cd8\") " pod="openstack/dnsmasq-dns-5449989c59-wmxsr" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.133080 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e2c68410-1c80-45dc-b1be-ed9307460cd8-dns-svc\") pod \"dnsmasq-dns-5449989c59-wmxsr\" (UID: \"e2c68410-1c80-45dc-b1be-ed9307460cd8\") " pod="openstack/dnsmasq-dns-5449989c59-wmxsr" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.133137 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2c68410-1c80-45dc-b1be-ed9307460cd8-config\") pod \"dnsmasq-dns-5449989c59-wmxsr\" (UID: \"e2c68410-1c80-45dc-b1be-ed9307460cd8\") " pod="openstack/dnsmasq-dns-5449989c59-wmxsr" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.133182 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwnhb\" (UniqueName: \"kubernetes.io/projected/e2c68410-1c80-45dc-b1be-ed9307460cd8-kube-api-access-mwnhb\") pod \"dnsmasq-dns-5449989c59-wmxsr\" (UID: \"e2c68410-1c80-45dc-b1be-ed9307460cd8\") " pod="openstack/dnsmasq-dns-5449989c59-wmxsr" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.136391 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e2c68410-1c80-45dc-b1be-ed9307460cd8-dns-svc\") pod \"dnsmasq-dns-5449989c59-wmxsr\" (UID: \"e2c68410-1c80-45dc-b1be-ed9307460cd8\") " pod="openstack/dnsmasq-dns-5449989c59-wmxsr" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.136575 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2c68410-1c80-45dc-b1be-ed9307460cd8-config\") pod \"dnsmasq-dns-5449989c59-wmxsr\" (UID: \"e2c68410-1c80-45dc-b1be-ed9307460cd8\") " pod="openstack/dnsmasq-dns-5449989c59-wmxsr" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.153673 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwnhb\" (UniqueName: \"kubernetes.io/projected/e2c68410-1c80-45dc-b1be-ed9307460cd8-kube-api-access-mwnhb\") pod \"dnsmasq-dns-5449989c59-wmxsr\" (UID: \"e2c68410-1c80-45dc-b1be-ed9307460cd8\") " pod="openstack/dnsmasq-dns-5449989c59-wmxsr" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.261778 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b9b4959cc-mjnvz"] Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.263347 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5449989c59-wmxsr" Nov 26 05:41:31 crc kubenswrapper[4871]: W1126 05:41:31.277671 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcb9b3c97_7742_4e8d_b3ec_a17c8ae67d3e.slice/crio-b3629a032cb700ee846eda08bf5bc5e343963dc4819b359d87124f896168ebf7 WatchSource:0}: Error finding container b3629a032cb700ee846eda08bf5bc5e343963dc4819b359d87124f896168ebf7: Status 404 returned error can't find the container with id b3629a032cb700ee846eda08bf5bc5e343963dc4819b359d87124f896168ebf7 Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.387783 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b9b4959cc-mjnvz" event={"ID":"cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e","Type":"ContainerStarted","Data":"b3629a032cb700ee846eda08bf5bc5e343963dc4819b359d87124f896168ebf7"} Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.401636 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86b8f4ff9-gzswq"] Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.469209 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.470612 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.476184 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.476594 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.476765 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.476879 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.477502 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.477639 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-mqgn6" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.477747 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.496143 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.538758 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.538813 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4ba97673-d74c-47df-acae-f2dcc1ed10df-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.538870 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4ba97673-d74c-47df-acae-f2dcc1ed10df-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.538897 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4ba97673-d74c-47df-acae-f2dcc1ed10df-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.538919 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4ba97673-d74c-47df-acae-f2dcc1ed10df-config-data\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.538956 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.540994 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.541071 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.541121 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4ba97673-d74c-47df-acae-f2dcc1ed10df-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.544135 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cx7rd\" (UniqueName: \"kubernetes.io/projected/4ba97673-d74c-47df-acae-f2dcc1ed10df-kube-api-access-cx7rd\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.544210 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.645593 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.645658 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4ba97673-d74c-47df-acae-f2dcc1ed10df-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.645701 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4ba97673-d74c-47df-acae-f2dcc1ed10df-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.645724 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4ba97673-d74c-47df-acae-f2dcc1ed10df-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.645747 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4ba97673-d74c-47df-acae-f2dcc1ed10df-config-data\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.645769 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.645819 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.645844 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.645873 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4ba97673-d74c-47df-acae-f2dcc1ed10df-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.645906 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cx7rd\" (UniqueName: \"kubernetes.io/projected/4ba97673-d74c-47df-acae-f2dcc1ed10df-kube-api-access-cx7rd\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.645935 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.645993 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.646856 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.646908 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4ba97673-d74c-47df-acae-f2dcc1ed10df-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.647775 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4ba97673-d74c-47df-acae-f2dcc1ed10df-config-data\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.648001 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.648112 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4ba97673-d74c-47df-acae-f2dcc1ed10df-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.651069 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4ba97673-d74c-47df-acae-f2dcc1ed10df-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.651411 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.657466 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4ba97673-d74c-47df-acae-f2dcc1ed10df-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.657949 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.674844 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.677462 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cx7rd\" (UniqueName: \"kubernetes.io/projected/4ba97673-d74c-47df-acae-f2dcc1ed10df-kube-api-access-cx7rd\") pod \"rabbitmq-server-0\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.707261 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5449989c59-wmxsr"] Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.749707 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.750937 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.753219 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.754361 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.755048 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.756059 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.759672 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.760025 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-sw6fb" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.761648 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.771114 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.796964 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.848728 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b3f9dfba-a3a9-45ef-a96c-91c654671b97-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.848812 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfbqb\" (UniqueName: \"kubernetes.io/projected/b3f9dfba-a3a9-45ef-a96c-91c654671b97-kube-api-access-kfbqb\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.848843 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b3f9dfba-a3a9-45ef-a96c-91c654671b97-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.848873 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b3f9dfba-a3a9-45ef-a96c-91c654671b97-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.849012 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3f9dfba-a3a9-45ef-a96c-91c654671b97-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.849040 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.849065 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b3f9dfba-a3a9-45ef-a96c-91c654671b97-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.849087 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.849162 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.849201 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.849358 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.951092 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b3f9dfba-a3a9-45ef-a96c-91c654671b97-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.951564 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfbqb\" (UniqueName: \"kubernetes.io/projected/b3f9dfba-a3a9-45ef-a96c-91c654671b97-kube-api-access-kfbqb\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.951587 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b3f9dfba-a3a9-45ef-a96c-91c654671b97-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.951607 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b3f9dfba-a3a9-45ef-a96c-91c654671b97-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.951647 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3f9dfba-a3a9-45ef-a96c-91c654671b97-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.951667 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.951680 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b3f9dfba-a3a9-45ef-a96c-91c654671b97-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.951694 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.951722 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.951777 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.951820 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:31 crc kubenswrapper[4871]: I1126 05:41:31.952198 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:31.953013 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b3f9dfba-a3a9-45ef-a96c-91c654671b97-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:31.956232 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b3f9dfba-a3a9-45ef-a96c-91c654671b97-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.007250 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.007405 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.008918 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3f9dfba-a3a9-45ef-a96c-91c654671b97-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.011413 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b3f9dfba-a3a9-45ef-a96c-91c654671b97-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.012591 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b3f9dfba-a3a9-45ef-a96c-91c654671b97-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.015266 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfbqb\" (UniqueName: \"kubernetes.io/projected/b3f9dfba-a3a9-45ef-a96c-91c654671b97-kube-api-access-kfbqb\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.016925 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.017058 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.026339 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.027509 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.031224 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-default-user" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.031387 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-erlang-cookie" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.031553 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-notifications-svc" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.031672 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-server-conf" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.031807 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-plugins-conf" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.032718 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-server-dockercfg-ngj58" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.032938 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-config-data" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.053150 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7df95f1b-7a5b-445e-bb56-b17695a0bde9-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.053238 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmlpm\" (UniqueName: \"kubernetes.io/projected/7df95f1b-7a5b-445e-bb56-b17695a0bde9-kube-api-access-jmlpm\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.053270 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.053313 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7df95f1b-7a5b-445e-bb56-b17695a0bde9-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.053332 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7df95f1b-7a5b-445e-bb56-b17695a0bde9-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.053348 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7df95f1b-7a5b-445e-bb56-b17695a0bde9-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.053425 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7df95f1b-7a5b-445e-bb56-b17695a0bde9-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.053494 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7df95f1b-7a5b-445e-bb56-b17695a0bde9-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.053540 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7df95f1b-7a5b-445e-bb56-b17695a0bde9-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.053578 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7df95f1b-7a5b-445e-bb56-b17695a0bde9-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.053606 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7df95f1b-7a5b-445e-bb56-b17695a0bde9-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.055966 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.056335 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.080896 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.155697 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7df95f1b-7a5b-445e-bb56-b17695a0bde9-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.155769 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7df95f1b-7a5b-445e-bb56-b17695a0bde9-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.155787 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmlpm\" (UniqueName: \"kubernetes.io/projected/7df95f1b-7a5b-445e-bb56-b17695a0bde9-kube-api-access-jmlpm\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.155804 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.155821 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7df95f1b-7a5b-445e-bb56-b17695a0bde9-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.155835 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7df95f1b-7a5b-445e-bb56-b17695a0bde9-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.155851 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7df95f1b-7a5b-445e-bb56-b17695a0bde9-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.155870 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7df95f1b-7a5b-445e-bb56-b17695a0bde9-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.155907 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7df95f1b-7a5b-445e-bb56-b17695a0bde9-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.155933 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7df95f1b-7a5b-445e-bb56-b17695a0bde9-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.155957 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7df95f1b-7a5b-445e-bb56-b17695a0bde9-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.157153 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7df95f1b-7a5b-445e-bb56-b17695a0bde9-config-data\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.157203 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7df95f1b-7a5b-445e-bb56-b17695a0bde9-server-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.157251 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7df95f1b-7a5b-445e-bb56-b17695a0bde9-plugins-conf\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.157516 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.157916 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7df95f1b-7a5b-445e-bb56-b17695a0bde9-rabbitmq-erlang-cookie\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.158711 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7df95f1b-7a5b-445e-bb56-b17695a0bde9-rabbitmq-plugins\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.161628 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7df95f1b-7a5b-445e-bb56-b17695a0bde9-pod-info\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.161805 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7df95f1b-7a5b-445e-bb56-b17695a0bde9-rabbitmq-tls\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.162194 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7df95f1b-7a5b-445e-bb56-b17695a0bde9-erlang-cookie-secret\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.164351 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7df95f1b-7a5b-445e-bb56-b17695a0bde9-rabbitmq-confd\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.170153 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmlpm\" (UniqueName: \"kubernetes.io/projected/7df95f1b-7a5b-445e-bb56-b17695a0bde9-kube-api-access-jmlpm\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.177576 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-notifications-server-0\" (UID: \"7df95f1b-7a5b-445e-bb56-b17695a0bde9\") " pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.350307 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.358288 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 05:41:32 crc kubenswrapper[4871]: W1126 05:41:32.378418 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4ba97673_d74c_47df_acae_f2dcc1ed10df.slice/crio-200f44a9a5f1171868c1d4dd4f350d989277beea7c8d27453120140cb3d6593c WatchSource:0}: Error finding container 200f44a9a5f1171868c1d4dd4f350d989277beea7c8d27453120140cb3d6593c: Status 404 returned error can't find the container with id 200f44a9a5f1171868c1d4dd4f350d989277beea7c8d27453120140cb3d6593c Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.400487 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86b8f4ff9-gzswq" event={"ID":"c81cfa5f-8875-4d7d-ad9e-5c22439a7820","Type":"ContainerStarted","Data":"a32738a85d4a6d6e74998c7bd0a0e4a46d32baac03ee645f8de034cdc3309707"} Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.401726 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5449989c59-wmxsr" event={"ID":"e2c68410-1c80-45dc-b1be-ed9307460cd8","Type":"ContainerStarted","Data":"cfc3a1057712b74a63c9b5e981b76251a1fe57756aaa32dfa6acaf7b00a06dc0"} Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.403175 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4ba97673-d74c-47df-acae-f2dcc1ed10df","Type":"ContainerStarted","Data":"200f44a9a5f1171868c1d4dd4f350d989277beea7c8d27453120140cb3d6593c"} Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.591411 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 05:41:32 crc kubenswrapper[4871]: I1126 05:41:32.837272 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-notifications-server-0"] Nov 26 05:41:32 crc kubenswrapper[4871]: W1126 05:41:32.857600 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7df95f1b_7a5b_445e_bb56_b17695a0bde9.slice/crio-c6234a79cddd9c37bdd1b99004bb019496f3e95d958b368f1c3c3c91949e1027 WatchSource:0}: Error finding container c6234a79cddd9c37bdd1b99004bb019496f3e95d958b368f1c3c3c91949e1027: Status 404 returned error can't find the container with id c6234a79cddd9c37bdd1b99004bb019496f3e95d958b368f1c3c3c91949e1027 Nov 26 05:41:33 crc kubenswrapper[4871]: I1126 05:41:33.414006 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b3f9dfba-a3a9-45ef-a96c-91c654671b97","Type":"ContainerStarted","Data":"519b591f0063f72f1e7c0b2b265e922312e11ab5d5648d0c0a804fc2157c27ae"} Nov 26 05:41:33 crc kubenswrapper[4871]: I1126 05:41:33.415104 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"7df95f1b-7a5b-445e-bb56-b17695a0bde9","Type":"ContainerStarted","Data":"c6234a79cddd9c37bdd1b99004bb019496f3e95d958b368f1c3c3c91949e1027"} Nov 26 05:41:33 crc kubenswrapper[4871]: I1126 05:41:33.846598 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 26 05:41:33 crc kubenswrapper[4871]: I1126 05:41:33.851353 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 05:41:33 crc kubenswrapper[4871]: I1126 05:41:33.853801 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 26 05:41:33 crc kubenswrapper[4871]: I1126 05:41:33.853870 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 26 05:41:33 crc kubenswrapper[4871]: I1126 05:41:33.859317 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-ll4ht" Nov 26 05:41:33 crc kubenswrapper[4871]: I1126 05:41:33.859622 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 26 05:41:33 crc kubenswrapper[4871]: I1126 05:41:33.863735 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 05:41:33 crc kubenswrapper[4871]: I1126 05:41:33.868176 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 26 05:41:33 crc kubenswrapper[4871]: I1126 05:41:33.989331 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fef4681d-3f18-4ed5-b251-92f53274dacd-kolla-config\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:33 crc kubenswrapper[4871]: I1126 05:41:33.989388 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fef4681d-3f18-4ed5-b251-92f53274dacd-operator-scripts\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:33 crc kubenswrapper[4871]: I1126 05:41:33.989573 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/fef4681d-3f18-4ed5-b251-92f53274dacd-config-data-generated\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:33 crc kubenswrapper[4871]: I1126 05:41:33.989626 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:33 crc kubenswrapper[4871]: I1126 05:41:33.989659 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/fef4681d-3f18-4ed5-b251-92f53274dacd-config-data-default\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:33 crc kubenswrapper[4871]: I1126 05:41:33.989678 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fef4681d-3f18-4ed5-b251-92f53274dacd-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:33 crc kubenswrapper[4871]: I1126 05:41:33.989953 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/fef4681d-3f18-4ed5-b251-92f53274dacd-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:33 crc kubenswrapper[4871]: I1126 05:41:33.990041 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5wr7\" (UniqueName: \"kubernetes.io/projected/fef4681d-3f18-4ed5-b251-92f53274dacd-kube-api-access-l5wr7\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:34 crc kubenswrapper[4871]: I1126 05:41:34.091495 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/fef4681d-3f18-4ed5-b251-92f53274dacd-config-data-generated\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:34 crc kubenswrapper[4871]: I1126 05:41:34.091544 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:34 crc kubenswrapper[4871]: I1126 05:41:34.091567 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/fef4681d-3f18-4ed5-b251-92f53274dacd-config-data-default\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:34 crc kubenswrapper[4871]: I1126 05:41:34.091586 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fef4681d-3f18-4ed5-b251-92f53274dacd-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:34 crc kubenswrapper[4871]: I1126 05:41:34.091607 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/fef4681d-3f18-4ed5-b251-92f53274dacd-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:34 crc kubenswrapper[4871]: I1126 05:41:34.091624 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5wr7\" (UniqueName: \"kubernetes.io/projected/fef4681d-3f18-4ed5-b251-92f53274dacd-kube-api-access-l5wr7\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:34 crc kubenswrapper[4871]: I1126 05:41:34.091664 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fef4681d-3f18-4ed5-b251-92f53274dacd-kolla-config\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:34 crc kubenswrapper[4871]: I1126 05:41:34.091700 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fef4681d-3f18-4ed5-b251-92f53274dacd-operator-scripts\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:34 crc kubenswrapper[4871]: I1126 05:41:34.091924 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/openstack-galera-0" Nov 26 05:41:34 crc kubenswrapper[4871]: I1126 05:41:34.092026 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/fef4681d-3f18-4ed5-b251-92f53274dacd-config-data-generated\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:34 crc kubenswrapper[4871]: I1126 05:41:34.092726 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fef4681d-3f18-4ed5-b251-92f53274dacd-kolla-config\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:34 crc kubenswrapper[4871]: I1126 05:41:34.093412 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fef4681d-3f18-4ed5-b251-92f53274dacd-operator-scripts\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:34 crc kubenswrapper[4871]: I1126 05:41:34.095922 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/fef4681d-3f18-4ed5-b251-92f53274dacd-config-data-default\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:34 crc kubenswrapper[4871]: I1126 05:41:34.098783 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fef4681d-3f18-4ed5-b251-92f53274dacd-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:34 crc kubenswrapper[4871]: I1126 05:41:34.112373 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5wr7\" (UniqueName: \"kubernetes.io/projected/fef4681d-3f18-4ed5-b251-92f53274dacd-kube-api-access-l5wr7\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:34 crc kubenswrapper[4871]: I1126 05:41:34.115631 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/fef4681d-3f18-4ed5-b251-92f53274dacd-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:34 crc kubenswrapper[4871]: I1126 05:41:34.119060 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"fef4681d-3f18-4ed5-b251-92f53274dacd\") " pod="openstack/openstack-galera-0" Nov 26 05:41:34 crc kubenswrapper[4871]: I1126 05:41:34.179520 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 05:41:34 crc kubenswrapper[4871]: I1126 05:41:34.681029 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 05:41:34 crc kubenswrapper[4871]: W1126 05:41:34.688683 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfef4681d_3f18_4ed5_b251_92f53274dacd.slice/crio-4d544cdeb56eec3bef067f13582de0828e8d852d0832dbe9a0ba82e216ee71c7 WatchSource:0}: Error finding container 4d544cdeb56eec3bef067f13582de0828e8d852d0832dbe9a0ba82e216ee71c7: Status 404 returned error can't find the container with id 4d544cdeb56eec3bef067f13582de0828e8d852d0832dbe9a0ba82e216ee71c7 Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.231059 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.232583 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.244997 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.245549 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-d2mkt" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.246190 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.247181 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.247428 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.415218 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.415752 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.415785 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.415807 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5m69x\" (UniqueName: \"kubernetes.io/projected/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-kube-api-access-5m69x\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.415838 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.415883 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.415944 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.415992 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.474156 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"fef4681d-3f18-4ed5-b251-92f53274dacd","Type":"ContainerStarted","Data":"4d544cdeb56eec3bef067f13582de0828e8d852d0832dbe9a0ba82e216ee71c7"} Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.517701 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.517782 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.517817 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.517875 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.517896 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.517917 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.517947 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5m69x\" (UniqueName: \"kubernetes.io/projected/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-kube-api-access-5m69x\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.517969 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.518675 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.519040 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.519656 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.519703 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.523085 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.544265 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.544554 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.550549 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5m69x\" (UniqueName: \"kubernetes.io/projected/1a6ce456-795f-4bf1-bab9-f5de7cfd7abe-kube-api-access-5m69x\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.581327 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe\") " pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.649157 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.656883 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.656976 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.659872 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-7br6l" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.660472 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.660723 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.824537 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2757b1a6-7b8f-4008-8a08-96985496ec1a-kolla-config\") pod \"memcached-0\" (UID: \"2757b1a6-7b8f-4008-8a08-96985496ec1a\") " pod="openstack/memcached-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.824656 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2757b1a6-7b8f-4008-8a08-96985496ec1a-combined-ca-bundle\") pod \"memcached-0\" (UID: \"2757b1a6-7b8f-4008-8a08-96985496ec1a\") " pod="openstack/memcached-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.824734 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2757b1a6-7b8f-4008-8a08-96985496ec1a-config-data\") pod \"memcached-0\" (UID: \"2757b1a6-7b8f-4008-8a08-96985496ec1a\") " pod="openstack/memcached-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.824762 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/2757b1a6-7b8f-4008-8a08-96985496ec1a-memcached-tls-certs\") pod \"memcached-0\" (UID: \"2757b1a6-7b8f-4008-8a08-96985496ec1a\") " pod="openstack/memcached-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.824837 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bh4rq\" (UniqueName: \"kubernetes.io/projected/2757b1a6-7b8f-4008-8a08-96985496ec1a-kube-api-access-bh4rq\") pod \"memcached-0\" (UID: \"2757b1a6-7b8f-4008-8a08-96985496ec1a\") " pod="openstack/memcached-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.866022 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.925864 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2757b1a6-7b8f-4008-8a08-96985496ec1a-kolla-config\") pod \"memcached-0\" (UID: \"2757b1a6-7b8f-4008-8a08-96985496ec1a\") " pod="openstack/memcached-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.925925 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2757b1a6-7b8f-4008-8a08-96985496ec1a-combined-ca-bundle\") pod \"memcached-0\" (UID: \"2757b1a6-7b8f-4008-8a08-96985496ec1a\") " pod="openstack/memcached-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.925968 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2757b1a6-7b8f-4008-8a08-96985496ec1a-config-data\") pod \"memcached-0\" (UID: \"2757b1a6-7b8f-4008-8a08-96985496ec1a\") " pod="openstack/memcached-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.925997 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/2757b1a6-7b8f-4008-8a08-96985496ec1a-memcached-tls-certs\") pod \"memcached-0\" (UID: \"2757b1a6-7b8f-4008-8a08-96985496ec1a\") " pod="openstack/memcached-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.926039 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bh4rq\" (UniqueName: \"kubernetes.io/projected/2757b1a6-7b8f-4008-8a08-96985496ec1a-kube-api-access-bh4rq\") pod \"memcached-0\" (UID: \"2757b1a6-7b8f-4008-8a08-96985496ec1a\") " pod="openstack/memcached-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.926939 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2757b1a6-7b8f-4008-8a08-96985496ec1a-kolla-config\") pod \"memcached-0\" (UID: \"2757b1a6-7b8f-4008-8a08-96985496ec1a\") " pod="openstack/memcached-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.926948 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2757b1a6-7b8f-4008-8a08-96985496ec1a-config-data\") pod \"memcached-0\" (UID: \"2757b1a6-7b8f-4008-8a08-96985496ec1a\") " pod="openstack/memcached-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.929846 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2757b1a6-7b8f-4008-8a08-96985496ec1a-combined-ca-bundle\") pod \"memcached-0\" (UID: \"2757b1a6-7b8f-4008-8a08-96985496ec1a\") " pod="openstack/memcached-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.929981 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/2757b1a6-7b8f-4008-8a08-96985496ec1a-memcached-tls-certs\") pod \"memcached-0\" (UID: \"2757b1a6-7b8f-4008-8a08-96985496ec1a\") " pod="openstack/memcached-0" Nov 26 05:41:35 crc kubenswrapper[4871]: I1126 05:41:35.941039 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bh4rq\" (UniqueName: \"kubernetes.io/projected/2757b1a6-7b8f-4008-8a08-96985496ec1a-kube-api-access-bh4rq\") pod \"memcached-0\" (UID: \"2757b1a6-7b8f-4008-8a08-96985496ec1a\") " pod="openstack/memcached-0" Nov 26 05:41:36 crc kubenswrapper[4871]: I1126 05:41:36.013004 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 05:41:36 crc kubenswrapper[4871]: I1126 05:41:36.346845 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 05:41:36 crc kubenswrapper[4871]: W1126 05:41:36.356280 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1a6ce456_795f_4bf1_bab9_f5de7cfd7abe.slice/crio-725472d7964350dcc2af0d2680134d980d1ee927c5954de1d11ca7a7d5fd3a39 WatchSource:0}: Error finding container 725472d7964350dcc2af0d2680134d980d1ee927c5954de1d11ca7a7d5fd3a39: Status 404 returned error can't find the container with id 725472d7964350dcc2af0d2680134d980d1ee927c5954de1d11ca7a7d5fd3a39 Nov 26 05:41:36 crc kubenswrapper[4871]: I1126 05:41:36.450794 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 26 05:41:36 crc kubenswrapper[4871]: W1126 05:41:36.459744 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2757b1a6_7b8f_4008_8a08_96985496ec1a.slice/crio-f54a880c6b3c1b9615f9b320668029badcd8fa12ab5643d54cc1134ee72966e6 WatchSource:0}: Error finding container f54a880c6b3c1b9615f9b320668029badcd8fa12ab5643d54cc1134ee72966e6: Status 404 returned error can't find the container with id f54a880c6b3c1b9615f9b320668029badcd8fa12ab5643d54cc1134ee72966e6 Nov 26 05:41:36 crc kubenswrapper[4871]: I1126 05:41:36.483041 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"2757b1a6-7b8f-4008-8a08-96985496ec1a","Type":"ContainerStarted","Data":"f54a880c6b3c1b9615f9b320668029badcd8fa12ab5643d54cc1134ee72966e6"} Nov 26 05:41:36 crc kubenswrapper[4871]: I1126 05:41:36.484599 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe","Type":"ContainerStarted","Data":"725472d7964350dcc2af0d2680134d980d1ee927c5954de1d11ca7a7d5fd3a39"} Nov 26 05:41:37 crc kubenswrapper[4871]: I1126 05:41:37.268412 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 05:41:37 crc kubenswrapper[4871]: I1126 05:41:37.269994 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 05:41:37 crc kubenswrapper[4871]: I1126 05:41:37.271858 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-jtj7s" Nov 26 05:41:37 crc kubenswrapper[4871]: I1126 05:41:37.281649 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 05:41:37 crc kubenswrapper[4871]: I1126 05:41:37.366156 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsm7b\" (UniqueName: \"kubernetes.io/projected/44a0ad7f-f13b-492a-914f-359b86e8be85-kube-api-access-xsm7b\") pod \"kube-state-metrics-0\" (UID: \"44a0ad7f-f13b-492a-914f-359b86e8be85\") " pod="openstack/kube-state-metrics-0" Nov 26 05:41:37 crc kubenswrapper[4871]: I1126 05:41:37.467357 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsm7b\" (UniqueName: \"kubernetes.io/projected/44a0ad7f-f13b-492a-914f-359b86e8be85-kube-api-access-xsm7b\") pod \"kube-state-metrics-0\" (UID: \"44a0ad7f-f13b-492a-914f-359b86e8be85\") " pod="openstack/kube-state-metrics-0" Nov 26 05:41:37 crc kubenswrapper[4871]: I1126 05:41:37.497997 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsm7b\" (UniqueName: \"kubernetes.io/projected/44a0ad7f-f13b-492a-914f-359b86e8be85-kube-api-access-xsm7b\") pod \"kube-state-metrics-0\" (UID: \"44a0ad7f-f13b-492a-914f-359b86e8be85\") " pod="openstack/kube-state-metrics-0" Nov 26 05:41:37 crc kubenswrapper[4871]: I1126 05:41:37.622566 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.235949 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 05:41:38 crc kubenswrapper[4871]: W1126 05:41:38.247773 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod44a0ad7f_f13b_492a_914f_359b86e8be85.slice/crio-b4a5d08f43c58b12ff8e0099f470efda868583ddd145d293cc43a60e2cec4d22 WatchSource:0}: Error finding container b4a5d08f43c58b12ff8e0099f470efda868583ddd145d293cc43a60e2cec4d22: Status 404 returned error can't find the container with id b4a5d08f43c58b12ff8e0099f470efda868583ddd145d293cc43a60e2cec4d22 Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.543887 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"44a0ad7f-f13b-492a-914f-359b86e8be85","Type":"ContainerStarted","Data":"b4a5d08f43c58b12ff8e0099f470efda868583ddd145d293cc43a60e2cec4d22"} Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.591757 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.593750 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.599786 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.601872 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.602032 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.602370 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-qb6hl" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.602377 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.614032 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.627949 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.697364 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/25606939-d595-4bfc-aead-c40883fdae31-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.697412 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/25606939-d595-4bfc-aead-c40883fdae31-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.697448 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/25606939-d595-4bfc-aead-c40883fdae31-config\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.697476 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.697592 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/25606939-d595-4bfc-aead-c40883fdae31-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.697645 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/25606939-d595-4bfc-aead-c40883fdae31-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.698263 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/25606939-d595-4bfc-aead-c40883fdae31-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.698320 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grwwg\" (UniqueName: \"kubernetes.io/projected/25606939-d595-4bfc-aead-c40883fdae31-kube-api-access-grwwg\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.799563 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/25606939-d595-4bfc-aead-c40883fdae31-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.799999 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/25606939-d595-4bfc-aead-c40883fdae31-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.800028 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/25606939-d595-4bfc-aead-c40883fdae31-config\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.800050 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.800087 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/25606939-d595-4bfc-aead-c40883fdae31-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.800111 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/25606939-d595-4bfc-aead-c40883fdae31-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.800142 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/25606939-d595-4bfc-aead-c40883fdae31-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.800171 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grwwg\" (UniqueName: \"kubernetes.io/projected/25606939-d595-4bfc-aead-c40883fdae31-kube-api-access-grwwg\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.807461 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/25606939-d595-4bfc-aead-c40883fdae31-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.808596 4871 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.808644 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/411f5d2e0132cbadcdbc80898abccb6eaaa272fad7576dd15cdb4f42514f558a/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.818036 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/25606939-d595-4bfc-aead-c40883fdae31-config\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.818143 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/25606939-d595-4bfc-aead-c40883fdae31-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.818156 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/25606939-d595-4bfc-aead-c40883fdae31-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.823446 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/25606939-d595-4bfc-aead-c40883fdae31-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.830340 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grwwg\" (UniqueName: \"kubernetes.io/projected/25606939-d595-4bfc-aead-c40883fdae31-kube-api-access-grwwg\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.839517 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/25606939-d595-4bfc-aead-c40883fdae31-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.862082 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\") pod \"prometheus-metric-storage-0\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:38 crc kubenswrapper[4871]: I1126 05:41:38.929175 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 05:41:39 crc kubenswrapper[4871]: I1126 05:41:39.665789 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 05:41:40 crc kubenswrapper[4871]: I1126 05:41:40.564248 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"25606939-d595-4bfc-aead-c40883fdae31","Type":"ContainerStarted","Data":"60ba71b9378e1b112d33eaa60fc405b0daad5296a45194e8ce4fd351c000d16d"} Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.296951 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-m255d"] Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.298111 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.302315 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.302417 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.302777 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-lv2x2" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.309568 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-m255d"] Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.399000 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-t9t82"] Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.402968 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.404457 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-t9t82"] Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.449678 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/de8a947b-6c51-4c33-b221-ea16d851bafb-var-run-ovn\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.449737 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/de8a947b-6c51-4c33-b221-ea16d851bafb-var-log-ovn\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.449789 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/de8a947b-6c51-4c33-b221-ea16d851bafb-ovn-controller-tls-certs\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.449810 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de8a947b-6c51-4c33-b221-ea16d851bafb-scripts\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.449883 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/de8a947b-6c51-4c33-b221-ea16d851bafb-var-run\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.449901 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de8a947b-6c51-4c33-b221-ea16d851bafb-combined-ca-bundle\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.449929 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7mz9\" (UniqueName: \"kubernetes.io/projected/de8a947b-6c51-4c33-b221-ea16d851bafb-kube-api-access-c7mz9\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.551900 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/de8a947b-6c51-4c33-b221-ea16d851bafb-ovn-controller-tls-certs\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.551957 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e-var-run\") pod \"ovn-controller-ovs-t9t82\" (UID: \"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e\") " pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.551986 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de8a947b-6c51-4c33-b221-ea16d851bafb-scripts\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.552025 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e-var-lib\") pod \"ovn-controller-ovs-t9t82\" (UID: \"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e\") " pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.552047 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e-var-log\") pod \"ovn-controller-ovs-t9t82\" (UID: \"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e\") " pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.552071 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/de8a947b-6c51-4c33-b221-ea16d851bafb-var-run\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.552097 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de8a947b-6c51-4c33-b221-ea16d851bafb-combined-ca-bundle\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.552120 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e-etc-ovs\") pod \"ovn-controller-ovs-t9t82\" (UID: \"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e\") " pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.552159 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7mz9\" (UniqueName: \"kubernetes.io/projected/de8a947b-6c51-4c33-b221-ea16d851bafb-kube-api-access-c7mz9\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.552184 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jd9bx\" (UniqueName: \"kubernetes.io/projected/9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e-kube-api-access-jd9bx\") pod \"ovn-controller-ovs-t9t82\" (UID: \"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e\") " pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.552216 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e-scripts\") pod \"ovn-controller-ovs-t9t82\" (UID: \"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e\") " pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.552275 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/de8a947b-6c51-4c33-b221-ea16d851bafb-var-run-ovn\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.552315 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/de8a947b-6c51-4c33-b221-ea16d851bafb-var-log-ovn\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.553047 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/de8a947b-6c51-4c33-b221-ea16d851bafb-var-log-ovn\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.553271 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/de8a947b-6c51-4c33-b221-ea16d851bafb-var-run\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.553297 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/de8a947b-6c51-4c33-b221-ea16d851bafb-var-run-ovn\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.556243 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de8a947b-6c51-4c33-b221-ea16d851bafb-scripts\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.563119 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de8a947b-6c51-4c33-b221-ea16d851bafb-combined-ca-bundle\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.563186 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/de8a947b-6c51-4c33-b221-ea16d851bafb-ovn-controller-tls-certs\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.573651 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7mz9\" (UniqueName: \"kubernetes.io/projected/de8a947b-6c51-4c33-b221-ea16d851bafb-kube-api-access-c7mz9\") pod \"ovn-controller-m255d\" (UID: \"de8a947b-6c51-4c33-b221-ea16d851bafb\") " pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.654633 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e-var-lib\") pod \"ovn-controller-ovs-t9t82\" (UID: \"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e\") " pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.654684 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e-var-log\") pod \"ovn-controller-ovs-t9t82\" (UID: \"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e\") " pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.654715 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e-etc-ovs\") pod \"ovn-controller-ovs-t9t82\" (UID: \"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e\") " pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.654757 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jd9bx\" (UniqueName: \"kubernetes.io/projected/9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e-kube-api-access-jd9bx\") pod \"ovn-controller-ovs-t9t82\" (UID: \"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e\") " pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.654781 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e-scripts\") pod \"ovn-controller-ovs-t9t82\" (UID: \"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e\") " pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.654884 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e-var-run\") pod \"ovn-controller-ovs-t9t82\" (UID: \"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e\") " pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.655104 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e-var-run\") pod \"ovn-controller-ovs-t9t82\" (UID: \"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e\") " pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.655581 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e-var-lib\") pod \"ovn-controller-ovs-t9t82\" (UID: \"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e\") " pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.656060 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e-var-log\") pod \"ovn-controller-ovs-t9t82\" (UID: \"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e\") " pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.656124 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e-etc-ovs\") pod \"ovn-controller-ovs-t9t82\" (UID: \"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e\") " pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.658252 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e-scripts\") pod \"ovn-controller-ovs-t9t82\" (UID: \"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e\") " pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.676878 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-m255d" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.677465 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.679598 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.683105 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.683426 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.684095 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.684672 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-8qh9s" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.684780 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.685673 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jd9bx\" (UniqueName: \"kubernetes.io/projected/9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e-kube-api-access-jd9bx\") pod \"ovn-controller-ovs-t9t82\" (UID: \"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e\") " pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.693438 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.717958 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.861065 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.861136 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.861157 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-config\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.861203 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.862137 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.862162 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.862191 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.862213 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76rc4\" (UniqueName: \"kubernetes.io/projected/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-kube-api-access-76rc4\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.963357 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.963421 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.963441 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-config\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.963497 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.963538 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.963562 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.963588 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.963609 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76rc4\" (UniqueName: \"kubernetes.io/projected/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-kube-api-access-76rc4\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.964273 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.964484 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-config\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.964911 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.965426 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.979670 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:41 crc kubenswrapper[4871]: I1126 05:41:41.986593 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:42 crc kubenswrapper[4871]: I1126 05:41:42.015564 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:42 crc kubenswrapper[4871]: I1126 05:41:42.024929 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76rc4\" (UniqueName: \"kubernetes.io/projected/df0ee863-8fbb-4a6e-86e3-8d56cf38da47-kube-api-access-76rc4\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:42 crc kubenswrapper[4871]: I1126 05:41:42.102091 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"df0ee863-8fbb-4a6e-86e3-8d56cf38da47\") " pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:42 crc kubenswrapper[4871]: I1126 05:41:42.326303 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 05:41:42 crc kubenswrapper[4871]: I1126 05:41:42.395955 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-m255d"] Nov 26 05:41:42 crc kubenswrapper[4871]: W1126 05:41:42.402045 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podde8a947b_6c51_4c33_b221_ea16d851bafb.slice/crio-3612b5c63850fe43c6a53d567f0fc1e0f7ab66d4bfe2ae436a4b4662ddbd7ebf WatchSource:0}: Error finding container 3612b5c63850fe43c6a53d567f0fc1e0f7ab66d4bfe2ae436a4b4662ddbd7ebf: Status 404 returned error can't find the container with id 3612b5c63850fe43c6a53d567f0fc1e0f7ab66d4bfe2ae436a4b4662ddbd7ebf Nov 26 05:41:42 crc kubenswrapper[4871]: I1126 05:41:42.615597 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-m255d" event={"ID":"de8a947b-6c51-4c33-b221-ea16d851bafb","Type":"ContainerStarted","Data":"3612b5c63850fe43c6a53d567f0fc1e0f7ab66d4bfe2ae436a4b4662ddbd7ebf"} Nov 26 05:41:42 crc kubenswrapper[4871]: I1126 05:41:42.624317 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"44a0ad7f-f13b-492a-914f-359b86e8be85","Type":"ContainerStarted","Data":"cf324bf05fc9eac95b2c634bdce995f796f2fbf27ee8b2b42aa4fad6ee23cebd"} Nov 26 05:41:42 crc kubenswrapper[4871]: I1126 05:41:42.748753 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-t9t82"] Nov 26 05:41:42 crc kubenswrapper[4871]: I1126 05:41:42.757020 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.4975085200000002 podStartE2EDuration="5.757005149s" podCreationTimestamp="2025-11-26 05:41:37 +0000 UTC" firstStartedPulling="2025-11-26 05:41:38.250182418 +0000 UTC m=+956.433234004" lastFinishedPulling="2025-11-26 05:41:41.509679027 +0000 UTC m=+959.692730633" observedRunningTime="2025-11-26 05:41:42.723166202 +0000 UTC m=+960.906217788" watchObservedRunningTime="2025-11-26 05:41:42.757005149 +0000 UTC m=+960.940056735" Nov 26 05:41:42 crc kubenswrapper[4871]: W1126 05:41:42.758661 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9a7a8e3f_4fa4_484d_8f81_63a8e9dab10e.slice/crio-423114140cb046e39e315f4ca790158b501076f9b6f9d99c0a29da3c7c418468 WatchSource:0}: Error finding container 423114140cb046e39e315f4ca790158b501076f9b6f9d99c0a29da3c7c418468: Status 404 returned error can't find the container with id 423114140cb046e39e315f4ca790158b501076f9b6f9d99c0a29da3c7c418468 Nov 26 05:41:42 crc kubenswrapper[4871]: I1126 05:41:42.914256 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.023941 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-wmd2n"] Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.025250 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.031106 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.038802 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-wmd2n"] Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.102453 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd-ovn-rundir\") pod \"ovn-controller-metrics-wmd2n\" (UID: \"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd\") " pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.102790 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wmd2n\" (UID: \"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd\") " pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.102845 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd-combined-ca-bundle\") pod \"ovn-controller-metrics-wmd2n\" (UID: \"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd\") " pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.103007 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd-ovs-rundir\") pod \"ovn-controller-metrics-wmd2n\" (UID: \"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd\") " pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.103043 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nn6zk\" (UniqueName: \"kubernetes.io/projected/d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd-kube-api-access-nn6zk\") pod \"ovn-controller-metrics-wmd2n\" (UID: \"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd\") " pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.103067 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd-config\") pod \"ovn-controller-metrics-wmd2n\" (UID: \"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd\") " pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.204262 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd-ovs-rundir\") pod \"ovn-controller-metrics-wmd2n\" (UID: \"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd\") " pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.204322 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nn6zk\" (UniqueName: \"kubernetes.io/projected/d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd-kube-api-access-nn6zk\") pod \"ovn-controller-metrics-wmd2n\" (UID: \"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd\") " pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.204344 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd-config\") pod \"ovn-controller-metrics-wmd2n\" (UID: \"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd\") " pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.204367 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd-ovn-rundir\") pod \"ovn-controller-metrics-wmd2n\" (UID: \"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd\") " pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.204395 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wmd2n\" (UID: \"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd\") " pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.204422 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd-combined-ca-bundle\") pod \"ovn-controller-metrics-wmd2n\" (UID: \"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd\") " pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.204694 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd-ovn-rundir\") pod \"ovn-controller-metrics-wmd2n\" (UID: \"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd\") " pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.205946 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd-config\") pod \"ovn-controller-metrics-wmd2n\" (UID: \"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd\") " pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.206025 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd-ovs-rundir\") pod \"ovn-controller-metrics-wmd2n\" (UID: \"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd\") " pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.222913 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nn6zk\" (UniqueName: \"kubernetes.io/projected/d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd-kube-api-access-nn6zk\") pod \"ovn-controller-metrics-wmd2n\" (UID: \"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd\") " pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.223043 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wmd2n\" (UID: \"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd\") " pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.236433 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5449989c59-wmxsr"] Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.240047 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd-combined-ca-bundle\") pod \"ovn-controller-metrics-wmd2n\" (UID: \"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd\") " pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.280622 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6fb75c485f-9gd4f"] Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.282020 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.284080 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.317414 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6fb75c485f-9gd4f"] Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.372196 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-wmd2n" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.407147 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-98mg9\" (UniqueName: \"kubernetes.io/projected/05f4a358-58da-4fd9-a7d2-ef651ac303d7-kube-api-access-98mg9\") pod \"dnsmasq-dns-6fb75c485f-9gd4f\" (UID: \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\") " pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.407187 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/05f4a358-58da-4fd9-a7d2-ef651ac303d7-dns-svc\") pod \"dnsmasq-dns-6fb75c485f-9gd4f\" (UID: \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\") " pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.407212 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05f4a358-58da-4fd9-a7d2-ef651ac303d7-config\") pod \"dnsmasq-dns-6fb75c485f-9gd4f\" (UID: \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\") " pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.407228 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/05f4a358-58da-4fd9-a7d2-ef651ac303d7-ovsdbserver-nb\") pod \"dnsmasq-dns-6fb75c485f-9gd4f\" (UID: \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\") " pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.509159 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-98mg9\" (UniqueName: \"kubernetes.io/projected/05f4a358-58da-4fd9-a7d2-ef651ac303d7-kube-api-access-98mg9\") pod \"dnsmasq-dns-6fb75c485f-9gd4f\" (UID: \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\") " pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.509208 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/05f4a358-58da-4fd9-a7d2-ef651ac303d7-dns-svc\") pod \"dnsmasq-dns-6fb75c485f-9gd4f\" (UID: \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\") " pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.509233 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05f4a358-58da-4fd9-a7d2-ef651ac303d7-config\") pod \"dnsmasq-dns-6fb75c485f-9gd4f\" (UID: \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\") " pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.509248 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/05f4a358-58da-4fd9-a7d2-ef651ac303d7-ovsdbserver-nb\") pod \"dnsmasq-dns-6fb75c485f-9gd4f\" (UID: \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\") " pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.510682 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/05f4a358-58da-4fd9-a7d2-ef651ac303d7-ovsdbserver-nb\") pod \"dnsmasq-dns-6fb75c485f-9gd4f\" (UID: \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\") " pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.511900 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/05f4a358-58da-4fd9-a7d2-ef651ac303d7-dns-svc\") pod \"dnsmasq-dns-6fb75c485f-9gd4f\" (UID: \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\") " pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.516915 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05f4a358-58da-4fd9-a7d2-ef651ac303d7-config\") pod \"dnsmasq-dns-6fb75c485f-9gd4f\" (UID: \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\") " pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.539149 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-98mg9\" (UniqueName: \"kubernetes.io/projected/05f4a358-58da-4fd9-a7d2-ef651ac303d7-kube-api-access-98mg9\") pod \"dnsmasq-dns-6fb75c485f-9gd4f\" (UID: \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\") " pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.605087 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.636441 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-t9t82" event={"ID":"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e","Type":"ContainerStarted","Data":"423114140cb046e39e315f4ca790158b501076f9b6f9d99c0a29da3c7c418468"} Nov 26 05:41:43 crc kubenswrapper[4871]: I1126 05:41:43.636627 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 26 05:41:44 crc kubenswrapper[4871]: W1126 05:41:44.881016 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf0ee863_8fbb_4a6e_86e3_8d56cf38da47.slice/crio-0cb06557abc32f6219ab26e631c821dd8b01f67fdbc663ebd55d1ba7778fffd5 WatchSource:0}: Error finding container 0cb06557abc32f6219ab26e631c821dd8b01f67fdbc663ebd55d1ba7778fffd5: Status 404 returned error can't find the container with id 0cb06557abc32f6219ab26e631c821dd8b01f67fdbc663ebd55d1ba7778fffd5 Nov 26 05:41:44 crc kubenswrapper[4871]: I1126 05:41:44.917693 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 05:41:44 crc kubenswrapper[4871]: I1126 05:41:44.919249 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:44 crc kubenswrapper[4871]: I1126 05:41:44.933750 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-shhw2" Nov 26 05:41:44 crc kubenswrapper[4871]: I1126 05:41:44.933992 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 05:41:44 crc kubenswrapper[4871]: I1126 05:41:44.934225 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 26 05:41:44 crc kubenswrapper[4871]: I1126 05:41:44.934458 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 26 05:41:44 crc kubenswrapper[4871]: I1126 05:41:44.934606 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.034695 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd85545a-d991-4635-8d4b-2b81937e389f-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.034739 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bd85545a-d991-4635-8d4b-2b81937e389f-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.034775 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd85545a-d991-4635-8d4b-2b81937e389f-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.034801 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd85545a-d991-4635-8d4b-2b81937e389f-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.034852 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.034881 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/bd85545a-d991-4635-8d4b-2b81937e389f-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.034901 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxc5r\" (UniqueName: \"kubernetes.io/projected/bd85545a-d991-4635-8d4b-2b81937e389f-kube-api-access-gxc5r\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.034925 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd85545a-d991-4635-8d4b-2b81937e389f-config\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.137020 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd85545a-d991-4635-8d4b-2b81937e389f-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.137060 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bd85545a-d991-4635-8d4b-2b81937e389f-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.137093 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd85545a-d991-4635-8d4b-2b81937e389f-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.137117 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd85545a-d991-4635-8d4b-2b81937e389f-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.137188 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.137211 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/bd85545a-d991-4635-8d4b-2b81937e389f-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.137230 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxc5r\" (UniqueName: \"kubernetes.io/projected/bd85545a-d991-4635-8d4b-2b81937e389f-kube-api-access-gxc5r\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.137255 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd85545a-d991-4635-8d4b-2b81937e389f-config\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.138008 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.138752 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/bd85545a-d991-4635-8d4b-2b81937e389f-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.140279 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bd85545a-d991-4635-8d4b-2b81937e389f-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.141464 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd85545a-d991-4635-8d4b-2b81937e389f-config\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.146322 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd85545a-d991-4635-8d4b-2b81937e389f-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.168113 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd85545a-d991-4635-8d4b-2b81937e389f-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.193841 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.194962 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd85545a-d991-4635-8d4b-2b81937e389f-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.196195 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxc5r\" (UniqueName: \"kubernetes.io/projected/bd85545a-d991-4635-8d4b-2b81937e389f-kube-api-access-gxc5r\") pod \"ovsdbserver-sb-0\" (UID: \"bd85545a-d991-4635-8d4b-2b81937e389f\") " pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.275296 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 05:41:45 crc kubenswrapper[4871]: I1126 05:41:45.662041 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"df0ee863-8fbb-4a6e-86e3-8d56cf38da47","Type":"ContainerStarted","Data":"0cb06557abc32f6219ab26e631c821dd8b01f67fdbc663ebd55d1ba7778fffd5"} Nov 26 05:41:47 crc kubenswrapper[4871]: I1126 05:41:47.627586 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 26 05:41:59 crc kubenswrapper[4871]: E1126 05:41:59.583917 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = reading blob sha256:6851ae3ecb0a8ff571ff0f187ccd7f946fe98a816ae263d745038315b28ed393: Get \"https://quay.rdoproject.org/v2/podified-master-centos10/openstack-ovn-controller/blobs/sha256:6851ae3ecb0a8ff571ff0f187ccd7f946fe98a816ae263d745038315b28ed393\": context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-ovn-controller:current" Nov 26 05:41:59 crc kubenswrapper[4871]: E1126 05:41:59.585071 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = reading blob sha256:6851ae3ecb0a8ff571ff0f187ccd7f946fe98a816ae263d745038315b28ed393: Get \"https://quay.rdoproject.org/v2/podified-master-centos10/openstack-ovn-controller/blobs/sha256:6851ae3ecb0a8ff571ff0f187ccd7f946fe98a816ae263d745038315b28ed393\": context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-ovn-controller:current" Nov 26 05:41:59 crc kubenswrapper[4871]: E1126 05:41:59.585366 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovn-controller,Image:quay.rdoproject.org/podified-master-centos10/openstack-ovn-controller:current,Command:[ovn-controller --pidfile unix:/run/openvswitch/db.sock --certificate=/etc/pki/tls/certs/ovndb.crt --private-key=/etc/pki/tls/private/ovndb.key --ca-cert=/etc/pki/tls/certs/ovndbca.crt],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n676hc6h677h5bdh5f5h694h54chbdh659h64ch544h5dbh577hd5h675h5d4h4h5fhch56ch6ch558h64ch69h6ch76h56bh575h67h5d6h56ch55cq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run-ovn,ReadOnly:false,MountPath:/var/run/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log-ovn,ReadOnly:false,MountPath:/var/log/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-c7mz9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_liveness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_readiness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/share/ovn/scripts/ovn-ctl stop_controller],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-m255d_openstack(de8a947b-6c51-4c33-b221-ea16d851bafb): ErrImagePull: rpc error: code = Canceled desc = reading blob sha256:6851ae3ecb0a8ff571ff0f187ccd7f946fe98a816ae263d745038315b28ed393: Get \"https://quay.rdoproject.org/v2/podified-master-centos10/openstack-ovn-controller/blobs/sha256:6851ae3ecb0a8ff571ff0f187ccd7f946fe98a816ae263d745038315b28ed393\": context canceled" logger="UnhandledError" Nov 26 05:41:59 crc kubenswrapper[4871]: E1126 05:41:59.587628 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ErrImagePull: \"rpc error: code = Canceled desc = reading blob sha256:6851ae3ecb0a8ff571ff0f187ccd7f946fe98a816ae263d745038315b28ed393: Get \\\"https://quay.rdoproject.org/v2/podified-master-centos10/openstack-ovn-controller/blobs/sha256:6851ae3ecb0a8ff571ff0f187ccd7f946fe98a816ae263d745038315b28ed393\\\": context canceled\"" pod="openstack/ovn-controller-m255d" podUID="de8a947b-6c51-4c33-b221-ea16d851bafb" Nov 26 05:41:59 crc kubenswrapper[4871]: E1126 05:41:59.787429 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ovn-controller:current\\\"\"" pod="openstack/ovn-controller-m255d" podUID="de8a947b-6c51-4c33-b221-ea16d851bafb" Nov 26 05:42:08 crc kubenswrapper[4871]: E1126 05:42:08.818759 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-mariadb:current" Nov 26 05:42:08 crc kubenswrapper[4871]: E1126 05:42:08.819613 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-mariadb:current" Nov 26 05:42:08 crc kubenswrapper[4871]: E1126 05:42:08.819782 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.rdoproject.org/podified-master-centos10/openstack-mariadb:current,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l5wr7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(fef4681d-3f18-4ed5-b251-92f53274dacd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 05:42:08 crc kubenswrapper[4871]: E1126 05:42:08.821077 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="fef4681d-3f18-4ed5-b251-92f53274dacd" Nov 26 05:42:08 crc kubenswrapper[4871]: E1126 05:42:08.871331 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-mariadb:current\\\"\"" pod="openstack/openstack-galera-0" podUID="fef4681d-3f18-4ed5-b251-92f53274dacd" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.047193 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-rabbitmq:current" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.047248 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-rabbitmq:current" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.047616 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.rdoproject.org/podified-master-centos10/openstack-rabbitmq:current,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kfbqb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(b3f9dfba-a3a9-45ef-a96c-91c654671b97): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.048768 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="b3f9dfba-a3a9-45ef-a96c-91c654671b97" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.111541 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-rabbitmq:current" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.111586 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-rabbitmq:current" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.111690 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.rdoproject.org/podified-master-centos10/openstack-rabbitmq:current,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jmlpm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-notifications-server-0_openstack(7df95f1b-7a5b-445e-bb56-b17695a0bde9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.111930 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-rabbitmq:current" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.111944 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-rabbitmq:current" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.112003 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.rdoproject.org/podified-master-centos10/openstack-rabbitmq:current,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cx7rd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(4ba97673-d74c-47df-acae-f2dcc1ed10df): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.113115 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="4ba97673-d74c-47df-acae-f2dcc1ed10df" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.113153 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-notifications-server-0" podUID="7df95f1b-7a5b-445e-bb56-b17695a0bde9" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.123775 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-mariadb:current" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.123829 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-mariadb:current" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.123966 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.rdoproject.org/podified-master-centos10/openstack-mariadb:current,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5m69x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(1a6ce456-795f-4bf1-bab9-f5de7cfd7abe): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.125687 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="1a6ce456-795f-4bf1-bab9-f5de7cfd7abe" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.691151 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-memcached:current" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.691471 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-memcached:current" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.691657 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.rdoproject.org/podified-master-centos10/openstack-memcached:current,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n65bh87hc8h5bch88h688h57h75h579h68dh5c7h5bch67fh7h87h574h54ch597h5f4h659h64fhbchd5h5c9h578h9fh54ch658h68bh598h9bh98q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bh4rq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(2757b1a6-7b8f-4008-8a08-96985496ec1a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.692875 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="2757b1a6-7b8f-4008-8a08-96985496ec1a" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.893676 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-rabbitmq:current\\\"\"" pod="openstack/rabbitmq-server-0" podUID="4ba97673-d74c-47df-acae-f2dcc1ed10df" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.893860 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-rabbitmq:current\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="b3f9dfba-a3a9-45ef-a96c-91c654671b97" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.893877 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-mariadb:current\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="1a6ce456-795f-4bf1-bab9-f5de7cfd7abe" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.893841 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-rabbitmq:current\\\"\"" pod="openstack/rabbitmq-notifications-server-0" podUID="7df95f1b-7a5b-445e-bb56-b17695a0bde9" Nov 26 05:42:10 crc kubenswrapper[4871]: E1126 05:42:10.893722 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-memcached:current\\\"\"" pod="openstack/memcached-0" podUID="2757b1a6-7b8f-4008-8a08-96985496ec1a" Nov 26 05:42:11 crc kubenswrapper[4871]: I1126 05:42:11.093324 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6fb75c485f-9gd4f"] Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.593543 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-ovn-base:current" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.594559 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-ovn-base:current" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.594863 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:ovsdb-server-init,Image:quay.rdoproject.org/podified-master-centos10/openstack-ovn-base:current,Command:[/usr/local/bin/container-scripts/init-ovsdb-server.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n676hc6h677h5bdh5f5h694h54chbdh659h64ch544h5dbh577hd5h675h5d4h4h5fhch56ch6ch558h64ch69h6ch76h56bh575h67h5d6h56ch55cq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-ovs,ReadOnly:false,MountPath:/etc/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log,ReadOnly:false,MountPath:/var/log/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-lib,ReadOnly:false,MountPath:/var/lib/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jd9bx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-ovs-t9t82_openstack(9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.596151 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdb-server-init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-ovs-t9t82" podUID="9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.643375 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-neutron-server:current" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.643728 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-neutron-server:current" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.643881 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.rdoproject.org/podified-master-centos10/openstack-neutron-server:current,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wsxlc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-545d49fd5c-jn2t4_openstack(31249d26-77aa-43b4-b6a2-6152cf3bf986): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.645216 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-545d49fd5c-jn2t4" podUID="31249d26-77aa-43b4-b6a2-6152cf3bf986" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.669832 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-neutron-server:current" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.669893 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-neutron-server:current" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.670030 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.rdoproject.org/podified-master-centos10/openstack-neutron-server:current,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mdqjx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-86b8f4ff9-gzswq_openstack(c81cfa5f-8875-4d7d-ad9e-5c22439a7820): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.671368 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-86b8f4ff9-gzswq" podUID="c81cfa5f-8875-4d7d-ad9e-5c22439a7820" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.693146 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-neutron-server:current" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.693195 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-neutron-server:current" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.693301 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.rdoproject.org/podified-master-centos10/openstack-neutron-server:current,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mcd92,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-8468885bfc-5xcmc_openstack(0dcea5db-05f7-4743-a5d6-0444dc2d2b6e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.694519 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-8468885bfc-5xcmc" podUID="0dcea5db-05f7-4743-a5d6-0444dc2d2b6e" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.726235 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-neutron-server:current" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.726290 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-neutron-server:current" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.726428 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.rdoproject.org/podified-master-centos10/openstack-neutron-server:current,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5c7h56dh5cfh8bh54fhbbhf4h5b9hdch67fhd7h55fh55fh6ch9h548h54ch665h647h6h8fhd6h5dfh5cdh58bh577h66fh695h5fbh55h77h5fcq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mwnhb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5449989c59-wmxsr_openstack(e2c68410-1c80-45dc-b1be-ed9307460cd8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.729806 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-5449989c59-wmxsr" podUID="e2c68410-1c80-45dc-b1be-ed9307460cd8" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.734549 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-neutron-server:current" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.734592 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-neutron-server:current" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.734727 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.rdoproject.org/podified-master-centos10/openstack-neutron-server:current,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hkxxz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-b9b4959cc-mjnvz_openstack(cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.735916 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-b9b4959cc-mjnvz" podUID="cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e" Nov 26 05:42:12 crc kubenswrapper[4871]: I1126 05:42:12.915978 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" event={"ID":"05f4a358-58da-4fd9-a7d2-ef651ac303d7","Type":"ContainerStarted","Data":"dd32febae17da52da97b9fb33977056bf77afeecf02ed4c7953e59706e22a04c"} Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.918035 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-neutron-server:current\\\"\"" pod="openstack/dnsmasq-dns-b9b4959cc-mjnvz" podUID="cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.918440 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdb-server-init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ovn-base:current\\\"\"" pod="openstack/ovn-controller-ovs-t9t82" podUID="9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.990977 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-ovn-nb-db-server:current" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.991029 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-ovn-nb-db-server:current" Nov 26 05:42:12 crc kubenswrapper[4871]: E1126 05:42:12.991190 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovsdbserver-nb,Image:quay.rdoproject.org/podified-master-centos10/openstack-ovn-nb-db-server:current,Command:[/usr/bin/dumb-init],Args:[/usr/local/bin/container-scripts/setup.sh],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nch666h5cfh7hbbh575h676h5dbh559hd8hdhffh577hdbh546h594hbch67bh5d6hffh5b5h86h64bh88h5bch56bh68h9bh99h98h6chd5q,ValueFrom:nil,},EnvVar{Name:OVN_LOGDIR,Value:/tmp,ValueFrom:nil,},EnvVar{Name:OVN_RUNDIR,Value:/tmp,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovndbcluster-nb-etc-ovn,ReadOnly:false,MountPath:/etc/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-76rc4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/cleanup.sh],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:20,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-nb-0_openstack(df0ee863-8fbb-4a6e-86e3-8d56cf38da47): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.097828 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-wmd2n"] Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.198390 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-545d49fd5c-jn2t4" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.296030 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31249d26-77aa-43b4-b6a2-6152cf3bf986-config\") pod \"31249d26-77aa-43b4-b6a2-6152cf3bf986\" (UID: \"31249d26-77aa-43b4-b6a2-6152cf3bf986\") " Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.296160 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wsxlc\" (UniqueName: \"kubernetes.io/projected/31249d26-77aa-43b4-b6a2-6152cf3bf986-kube-api-access-wsxlc\") pod \"31249d26-77aa-43b4-b6a2-6152cf3bf986\" (UID: \"31249d26-77aa-43b4-b6a2-6152cf3bf986\") " Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.296212 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31249d26-77aa-43b4-b6a2-6152cf3bf986-dns-svc\") pod \"31249d26-77aa-43b4-b6a2-6152cf3bf986\" (UID: \"31249d26-77aa-43b4-b6a2-6152cf3bf986\") " Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.296962 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31249d26-77aa-43b4-b6a2-6152cf3bf986-config" (OuterVolumeSpecName: "config") pod "31249d26-77aa-43b4-b6a2-6152cf3bf986" (UID: "31249d26-77aa-43b4-b6a2-6152cf3bf986"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.297866 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31249d26-77aa-43b4-b6a2-6152cf3bf986-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "31249d26-77aa-43b4-b6a2-6152cf3bf986" (UID: "31249d26-77aa-43b4-b6a2-6152cf3bf986"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.305574 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31249d26-77aa-43b4-b6a2-6152cf3bf986-kube-api-access-wsxlc" (OuterVolumeSpecName: "kube-api-access-wsxlc") pod "31249d26-77aa-43b4-b6a2-6152cf3bf986" (UID: "31249d26-77aa-43b4-b6a2-6152cf3bf986"). InnerVolumeSpecName "kube-api-access-wsxlc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.322491 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8468885bfc-5xcmc" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.398339 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dcea5db-05f7-4743-a5d6-0444dc2d2b6e-config\") pod \"0dcea5db-05f7-4743-a5d6-0444dc2d2b6e\" (UID: \"0dcea5db-05f7-4743-a5d6-0444dc2d2b6e\") " Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.398814 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcd92\" (UniqueName: \"kubernetes.io/projected/0dcea5db-05f7-4743-a5d6-0444dc2d2b6e-kube-api-access-mcd92\") pod \"0dcea5db-05f7-4743-a5d6-0444dc2d2b6e\" (UID: \"0dcea5db-05f7-4743-a5d6-0444dc2d2b6e\") " Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.399105 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wsxlc\" (UniqueName: \"kubernetes.io/projected/31249d26-77aa-43b4-b6a2-6152cf3bf986-kube-api-access-wsxlc\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.399119 4871 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31249d26-77aa-43b4-b6a2-6152cf3bf986-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.399128 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31249d26-77aa-43b4-b6a2-6152cf3bf986-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.399361 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0dcea5db-05f7-4743-a5d6-0444dc2d2b6e-config" (OuterVolumeSpecName: "config") pod "0dcea5db-05f7-4743-a5d6-0444dc2d2b6e" (UID: "0dcea5db-05f7-4743-a5d6-0444dc2d2b6e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.402069 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0dcea5db-05f7-4743-a5d6-0444dc2d2b6e-kube-api-access-mcd92" (OuterVolumeSpecName: "kube-api-access-mcd92") pod "0dcea5db-05f7-4743-a5d6-0444dc2d2b6e" (UID: "0dcea5db-05f7-4743-a5d6-0444dc2d2b6e"). InnerVolumeSpecName "kube-api-access-mcd92". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.430854 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86b8f4ff9-gzswq" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.434625 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5449989c59-wmxsr" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.500192 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c81cfa5f-8875-4d7d-ad9e-5c22439a7820-config\") pod \"c81cfa5f-8875-4d7d-ad9e-5c22439a7820\" (UID: \"c81cfa5f-8875-4d7d-ad9e-5c22439a7820\") " Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.500252 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e2c68410-1c80-45dc-b1be-ed9307460cd8-dns-svc\") pod \"e2c68410-1c80-45dc-b1be-ed9307460cd8\" (UID: \"e2c68410-1c80-45dc-b1be-ed9307460cd8\") " Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.500279 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c81cfa5f-8875-4d7d-ad9e-5c22439a7820-dns-svc\") pod \"c81cfa5f-8875-4d7d-ad9e-5c22439a7820\" (UID: \"c81cfa5f-8875-4d7d-ad9e-5c22439a7820\") " Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.500376 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mwnhb\" (UniqueName: \"kubernetes.io/projected/e2c68410-1c80-45dc-b1be-ed9307460cd8-kube-api-access-mwnhb\") pod \"e2c68410-1c80-45dc-b1be-ed9307460cd8\" (UID: \"e2c68410-1c80-45dc-b1be-ed9307460cd8\") " Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.500451 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mdqjx\" (UniqueName: \"kubernetes.io/projected/c81cfa5f-8875-4d7d-ad9e-5c22439a7820-kube-api-access-mdqjx\") pod \"c81cfa5f-8875-4d7d-ad9e-5c22439a7820\" (UID: \"c81cfa5f-8875-4d7d-ad9e-5c22439a7820\") " Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.500480 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2c68410-1c80-45dc-b1be-ed9307460cd8-config\") pod \"e2c68410-1c80-45dc-b1be-ed9307460cd8\" (UID: \"e2c68410-1c80-45dc-b1be-ed9307460cd8\") " Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.500666 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c81cfa5f-8875-4d7d-ad9e-5c22439a7820-config" (OuterVolumeSpecName: "config") pod "c81cfa5f-8875-4d7d-ad9e-5c22439a7820" (UID: "c81cfa5f-8875-4d7d-ad9e-5c22439a7820"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.500704 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2c68410-1c80-45dc-b1be-ed9307460cd8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e2c68410-1c80-45dc-b1be-ed9307460cd8" (UID: "e2c68410-1c80-45dc-b1be-ed9307460cd8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.500887 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c81cfa5f-8875-4d7d-ad9e-5c22439a7820-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c81cfa5f-8875-4d7d-ad9e-5c22439a7820" (UID: "c81cfa5f-8875-4d7d-ad9e-5c22439a7820"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.500941 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcd92\" (UniqueName: \"kubernetes.io/projected/0dcea5db-05f7-4743-a5d6-0444dc2d2b6e-kube-api-access-mcd92\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.501074 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c81cfa5f-8875-4d7d-ad9e-5c22439a7820-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.501074 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2c68410-1c80-45dc-b1be-ed9307460cd8-config" (OuterVolumeSpecName: "config") pod "e2c68410-1c80-45dc-b1be-ed9307460cd8" (UID: "e2c68410-1c80-45dc-b1be-ed9307460cd8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.501094 4871 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e2c68410-1c80-45dc-b1be-ed9307460cd8-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.501109 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dcea5db-05f7-4743-a5d6-0444dc2d2b6e-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.503757 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c81cfa5f-8875-4d7d-ad9e-5c22439a7820-kube-api-access-mdqjx" (OuterVolumeSpecName: "kube-api-access-mdqjx") pod "c81cfa5f-8875-4d7d-ad9e-5c22439a7820" (UID: "c81cfa5f-8875-4d7d-ad9e-5c22439a7820"). InnerVolumeSpecName "kube-api-access-mdqjx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.504022 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2c68410-1c80-45dc-b1be-ed9307460cd8-kube-api-access-mwnhb" (OuterVolumeSpecName: "kube-api-access-mwnhb") pod "e2c68410-1c80-45dc-b1be-ed9307460cd8" (UID: "e2c68410-1c80-45dc-b1be-ed9307460cd8"). InnerVolumeSpecName "kube-api-access-mwnhb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.602866 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mdqjx\" (UniqueName: \"kubernetes.io/projected/c81cfa5f-8875-4d7d-ad9e-5c22439a7820-kube-api-access-mdqjx\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.602907 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2c68410-1c80-45dc-b1be-ed9307460cd8-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.602921 4871 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c81cfa5f-8875-4d7d-ad9e-5c22439a7820-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.602934 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mwnhb\" (UniqueName: \"kubernetes.io/projected/e2c68410-1c80-45dc-b1be-ed9307460cd8-kube-api-access-mwnhb\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.932493 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.934589 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5449989c59-wmxsr" event={"ID":"e2c68410-1c80-45dc-b1be-ed9307460cd8","Type":"ContainerDied","Data":"cfc3a1057712b74a63c9b5e981b76251a1fe57756aaa32dfa6acaf7b00a06dc0"} Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.934652 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5449989c59-wmxsr" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.939398 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-wmd2n" event={"ID":"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd","Type":"ContainerStarted","Data":"82c5f63792b510eba17a3979def232487b72854d8dbc69cd5230f0de9e70df70"} Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.940914 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-545d49fd5c-jn2t4" event={"ID":"31249d26-77aa-43b4-b6a2-6152cf3bf986","Type":"ContainerDied","Data":"cebcb9530d82b63601213f5031a4ddbefdbb56f4382dccee313c5564fee7cc2a"} Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.940968 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-545d49fd5c-jn2t4" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.944667 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8468885bfc-5xcmc" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.944664 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8468885bfc-5xcmc" event={"ID":"0dcea5db-05f7-4743-a5d6-0444dc2d2b6e","Type":"ContainerDied","Data":"777fc58572517fab082d875a97c729ccab8bdb83104577608628bb2ec3cb6869"} Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.946152 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86b8f4ff9-gzswq" Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.946147 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86b8f4ff9-gzswq" event={"ID":"c81cfa5f-8875-4d7d-ad9e-5c22439a7820","Type":"ContainerDied","Data":"a32738a85d4a6d6e74998c7bd0a0e4a46d32baac03ee645f8de034cdc3309707"} Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.948760 4871 generic.go:334] "Generic (PLEG): container finished" podID="05f4a358-58da-4fd9-a7d2-ef651ac303d7" containerID="bde42051da0c37078bfa36b6d921d70e2b3efff9dbd824504696b223fc6853ec" exitCode=0 Nov 26 05:42:13 crc kubenswrapper[4871]: I1126 05:42:13.948798 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" event={"ID":"05f4a358-58da-4fd9-a7d2-ef651ac303d7","Type":"ContainerDied","Data":"bde42051da0c37078bfa36b6d921d70e2b3efff9dbd824504696b223fc6853ec"} Nov 26 05:42:14 crc kubenswrapper[4871]: I1126 05:42:14.072070 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5449989c59-wmxsr"] Nov 26 05:42:14 crc kubenswrapper[4871]: I1126 05:42:14.080479 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5449989c59-wmxsr"] Nov 26 05:42:14 crc kubenswrapper[4871]: I1126 05:42:14.112150 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8468885bfc-5xcmc"] Nov 26 05:42:14 crc kubenswrapper[4871]: I1126 05:42:14.120678 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8468885bfc-5xcmc"] Nov 26 05:42:14 crc kubenswrapper[4871]: I1126 05:42:14.141367 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-545d49fd5c-jn2t4"] Nov 26 05:42:14 crc kubenswrapper[4871]: I1126 05:42:14.150575 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-545d49fd5c-jn2t4"] Nov 26 05:42:14 crc kubenswrapper[4871]: I1126 05:42:14.164973 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86b8f4ff9-gzswq"] Nov 26 05:42:14 crc kubenswrapper[4871]: I1126 05:42:14.170288 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86b8f4ff9-gzswq"] Nov 26 05:42:14 crc kubenswrapper[4871]: I1126 05:42:14.517008 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0dcea5db-05f7-4743-a5d6-0444dc2d2b6e" path="/var/lib/kubelet/pods/0dcea5db-05f7-4743-a5d6-0444dc2d2b6e/volumes" Nov 26 05:42:14 crc kubenswrapper[4871]: I1126 05:42:14.517829 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31249d26-77aa-43b4-b6a2-6152cf3bf986" path="/var/lib/kubelet/pods/31249d26-77aa-43b4-b6a2-6152cf3bf986/volumes" Nov 26 05:42:14 crc kubenswrapper[4871]: I1126 05:42:14.518159 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c81cfa5f-8875-4d7d-ad9e-5c22439a7820" path="/var/lib/kubelet/pods/c81cfa5f-8875-4d7d-ad9e-5c22439a7820/volumes" Nov 26 05:42:14 crc kubenswrapper[4871]: I1126 05:42:14.518473 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2c68410-1c80-45dc-b1be-ed9307460cd8" path="/var/lib/kubelet/pods/e2c68410-1c80-45dc-b1be-ed9307460cd8/volumes" Nov 26 05:42:14 crc kubenswrapper[4871]: I1126 05:42:14.959604 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"bd85545a-d991-4635-8d4b-2b81937e389f","Type":"ContainerStarted","Data":"b192b3cc3b20b5aeb95687b606bae46459662d07f6559c6e976edbc104df44b1"} Nov 26 05:42:14 crc kubenswrapper[4871]: I1126 05:42:14.961634 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" event={"ID":"05f4a358-58da-4fd9-a7d2-ef651ac303d7","Type":"ContainerStarted","Data":"a546588e7dbf307f85f20a818e564106a3991cd5ff24026d382eb20d880624cc"} Nov 26 05:42:14 crc kubenswrapper[4871]: I1126 05:42:14.961749 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" Nov 26 05:42:14 crc kubenswrapper[4871]: I1126 05:42:14.963873 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-m255d" event={"ID":"de8a947b-6c51-4c33-b221-ea16d851bafb","Type":"ContainerStarted","Data":"d3e26038f493c9de37abdd9cb4f46a4f3ae7339328ca1807e641bbc770967690"} Nov 26 05:42:14 crc kubenswrapper[4871]: I1126 05:42:14.964565 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-m255d" Nov 26 05:42:14 crc kubenswrapper[4871]: I1126 05:42:14.987758 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" podStartSLOduration=31.503059941 podStartE2EDuration="31.987739653s" podCreationTimestamp="2025-11-26 05:41:43 +0000 UTC" firstStartedPulling="2025-11-26 05:42:12.631586365 +0000 UTC m=+990.814637951" lastFinishedPulling="2025-11-26 05:42:13.116266067 +0000 UTC m=+991.299317663" observedRunningTime="2025-11-26 05:42:14.982279899 +0000 UTC m=+993.165331505" watchObservedRunningTime="2025-11-26 05:42:14.987739653 +0000 UTC m=+993.170791229" Nov 26 05:42:15 crc kubenswrapper[4871]: I1126 05:42:15.001161 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-m255d" podStartSLOduration=2.596276715 podStartE2EDuration="34.001144822s" podCreationTimestamp="2025-11-26 05:41:41 +0000 UTC" firstStartedPulling="2025-11-26 05:41:42.406799541 +0000 UTC m=+960.589851127" lastFinishedPulling="2025-11-26 05:42:13.811667638 +0000 UTC m=+991.994719234" observedRunningTime="2025-11-26 05:42:14.999298066 +0000 UTC m=+993.182349652" watchObservedRunningTime="2025-11-26 05:42:15.001144822 +0000 UTC m=+993.184196408" Nov 26 05:42:15 crc kubenswrapper[4871]: I1126 05:42:15.972184 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"25606939-d595-4bfc-aead-c40883fdae31","Type":"ContainerStarted","Data":"b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f"} Nov 26 05:42:15 crc kubenswrapper[4871]: I1126 05:42:15.973826 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"bd85545a-d991-4635-8d4b-2b81937e389f","Type":"ContainerStarted","Data":"7773cb577ee1dd82a739105a161bb93eee0559c941d84e74b3e3461604bcd1db"} Nov 26 05:42:17 crc kubenswrapper[4871]: E1126 05:42:17.246080 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovsdbserver-nb-0" podUID="df0ee863-8fbb-4a6e-86e3-8d56cf38da47" Nov 26 05:42:17 crc kubenswrapper[4871]: I1126 05:42:17.992858 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"bd85545a-d991-4635-8d4b-2b81937e389f","Type":"ContainerStarted","Data":"f9374e3d87dbe07ee244746c103107757bb8c328939734cae08482c1e2b00a3a"} Nov 26 05:42:17 crc kubenswrapper[4871]: I1126 05:42:17.995125 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"df0ee863-8fbb-4a6e-86e3-8d56cf38da47","Type":"ContainerStarted","Data":"2fa066a1027d97c2299d7a026a81950dc73281e7e75e3668218850af3e440c76"} Nov 26 05:42:18 crc kubenswrapper[4871]: E1126 05:42:17.996781 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ovn-nb-db-server:current\\\"\"" pod="openstack/ovsdbserver-nb-0" podUID="df0ee863-8fbb-4a6e-86e3-8d56cf38da47" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.002380 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-wmd2n" event={"ID":"d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd","Type":"ContainerStarted","Data":"505050ad2c93b178e264b1757a0f3592ef6775246753d38c8ecbdd1b5bba5e48"} Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.021500 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=32.133744725 podStartE2EDuration="35.021477993s" podCreationTimestamp="2025-11-26 05:41:43 +0000 UTC" firstStartedPulling="2025-11-26 05:42:13.976327838 +0000 UTC m=+992.159379424" lastFinishedPulling="2025-11-26 05:42:16.864061106 +0000 UTC m=+995.047112692" observedRunningTime="2025-11-26 05:42:18.01364038 +0000 UTC m=+996.196691966" watchObservedRunningTime="2025-11-26 05:42:18.021477993 +0000 UTC m=+996.204529589" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.060751 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-wmd2n" podStartSLOduration=32.34239268 podStartE2EDuration="36.060726326s" podCreationTimestamp="2025-11-26 05:41:42 +0000 UTC" firstStartedPulling="2025-11-26 05:42:13.149494352 +0000 UTC m=+991.332545938" lastFinishedPulling="2025-11-26 05:42:16.867827998 +0000 UTC m=+995.050879584" observedRunningTime="2025-11-26 05:42:18.058517451 +0000 UTC m=+996.241569047" watchObservedRunningTime="2025-11-26 05:42:18.060726326 +0000 UTC m=+996.243777912" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.276119 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.348225 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.575395 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b9b4959cc-mjnvz"] Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.582429 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6dbf544cc9-jksqz"] Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.585018 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.588027 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.588208 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6dbf544cc9-jksqz"] Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.694181 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87qhv\" (UniqueName: \"kubernetes.io/projected/45374a1a-cee6-4d53-8643-e185d317425c-kube-api-access-87qhv\") pod \"dnsmasq-dns-6dbf544cc9-jksqz\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.694237 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-config\") pod \"dnsmasq-dns-6dbf544cc9-jksqz\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.694259 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-ovsdbserver-sb\") pod \"dnsmasq-dns-6dbf544cc9-jksqz\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.694414 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-ovsdbserver-nb\") pod \"dnsmasq-dns-6dbf544cc9-jksqz\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.694559 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-dns-svc\") pod \"dnsmasq-dns-6dbf544cc9-jksqz\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.795867 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-dns-svc\") pod \"dnsmasq-dns-6dbf544cc9-jksqz\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.796245 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87qhv\" (UniqueName: \"kubernetes.io/projected/45374a1a-cee6-4d53-8643-e185d317425c-kube-api-access-87qhv\") pod \"dnsmasq-dns-6dbf544cc9-jksqz\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.796268 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-config\") pod \"dnsmasq-dns-6dbf544cc9-jksqz\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.796285 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-ovsdbserver-sb\") pod \"dnsmasq-dns-6dbf544cc9-jksqz\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.796323 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-ovsdbserver-nb\") pod \"dnsmasq-dns-6dbf544cc9-jksqz\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.797158 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-dns-svc\") pod \"dnsmasq-dns-6dbf544cc9-jksqz\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.797301 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-ovsdbserver-nb\") pod \"dnsmasq-dns-6dbf544cc9-jksqz\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.797315 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-config\") pod \"dnsmasq-dns-6dbf544cc9-jksqz\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.797357 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-ovsdbserver-sb\") pod \"dnsmasq-dns-6dbf544cc9-jksqz\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.819865 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87qhv\" (UniqueName: \"kubernetes.io/projected/45374a1a-cee6-4d53-8643-e185d317425c-kube-api-access-87qhv\") pod \"dnsmasq-dns-6dbf544cc9-jksqz\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.878229 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b9b4959cc-mjnvz" Nov 26 05:42:18 crc kubenswrapper[4871]: I1126 05:42:18.910568 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:19 crc kubenswrapper[4871]: I1126 05:42:19.007407 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e-config\") pod \"cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e\" (UID: \"cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e\") " Nov 26 05:42:19 crc kubenswrapper[4871]: I1126 05:42:19.007484 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e-dns-svc\") pod \"cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e\" (UID: \"cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e\") " Nov 26 05:42:19 crc kubenswrapper[4871]: I1126 05:42:19.007695 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hkxxz\" (UniqueName: \"kubernetes.io/projected/cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e-kube-api-access-hkxxz\") pod \"cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e\" (UID: \"cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e\") " Nov 26 05:42:19 crc kubenswrapper[4871]: I1126 05:42:19.009425 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e" (UID: "cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:19 crc kubenswrapper[4871]: I1126 05:42:19.009839 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e-config" (OuterVolumeSpecName: "config") pod "cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e" (UID: "cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:19 crc kubenswrapper[4871]: I1126 05:42:19.028328 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e-kube-api-access-hkxxz" (OuterVolumeSpecName: "kube-api-access-hkxxz") pod "cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e" (UID: "cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e"). InnerVolumeSpecName "kube-api-access-hkxxz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:42:19 crc kubenswrapper[4871]: I1126 05:42:19.039021 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b9b4959cc-mjnvz" event={"ID":"cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e","Type":"ContainerDied","Data":"b3629a032cb700ee846eda08bf5bc5e343963dc4819b359d87124f896168ebf7"} Nov 26 05:42:19 crc kubenswrapper[4871]: I1126 05:42:19.039188 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b9b4959cc-mjnvz" Nov 26 05:42:19 crc kubenswrapper[4871]: I1126 05:42:19.039655 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 26 05:42:19 crc kubenswrapper[4871]: E1126 05:42:19.041119 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ovn-nb-db-server:current\\\"\"" pod="openstack/ovsdbserver-nb-0" podUID="df0ee863-8fbb-4a6e-86e3-8d56cf38da47" Nov 26 05:42:19 crc kubenswrapper[4871]: I1126 05:42:19.108764 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b9b4959cc-mjnvz"] Nov 26 05:42:19 crc kubenswrapper[4871]: I1126 05:42:19.111010 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hkxxz\" (UniqueName: \"kubernetes.io/projected/cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e-kube-api-access-hkxxz\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:19 crc kubenswrapper[4871]: I1126 05:42:19.111045 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:19 crc kubenswrapper[4871]: I1126 05:42:19.111059 4871 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:19 crc kubenswrapper[4871]: I1126 05:42:19.121827 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b9b4959cc-mjnvz"] Nov 26 05:42:19 crc kubenswrapper[4871]: I1126 05:42:19.366666 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6dbf544cc9-jksqz"] Nov 26 05:42:19 crc kubenswrapper[4871]: W1126 05:42:19.376638 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod45374a1a_cee6_4d53_8643_e185d317425c.slice/crio-4b93502a10967a0932b4359ea76cba35182f47a0a22d86eab0053b65b4b5e525 WatchSource:0}: Error finding container 4b93502a10967a0932b4359ea76cba35182f47a0a22d86eab0053b65b4b5e525: Status 404 returned error can't find the container with id 4b93502a10967a0932b4359ea76cba35182f47a0a22d86eab0053b65b4b5e525 Nov 26 05:42:20 crc kubenswrapper[4871]: I1126 05:42:20.053318 4871 generic.go:334] "Generic (PLEG): container finished" podID="45374a1a-cee6-4d53-8643-e185d317425c" containerID="fa1e052a86550a87879cd7603a6d28c7911cb9d22b21c4d059d3d0f3c9bbefe1" exitCode=0 Nov 26 05:42:20 crc kubenswrapper[4871]: I1126 05:42:20.053399 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" event={"ID":"45374a1a-cee6-4d53-8643-e185d317425c","Type":"ContainerDied","Data":"fa1e052a86550a87879cd7603a6d28c7911cb9d22b21c4d059d3d0f3c9bbefe1"} Nov 26 05:42:20 crc kubenswrapper[4871]: I1126 05:42:20.053880 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" event={"ID":"45374a1a-cee6-4d53-8643-e185d317425c","Type":"ContainerStarted","Data":"4b93502a10967a0932b4359ea76cba35182f47a0a22d86eab0053b65b4b5e525"} Nov 26 05:42:20 crc kubenswrapper[4871]: I1126 05:42:20.125519 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 26 05:42:20 crc kubenswrapper[4871]: I1126 05:42:20.518442 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e" path="/var/lib/kubelet/pods/cb9b3c97-7742-4e8d-b3ec-a17c8ae67d3e/volumes" Nov 26 05:42:21 crc kubenswrapper[4871]: I1126 05:42:21.063123 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"fef4681d-3f18-4ed5-b251-92f53274dacd","Type":"ContainerStarted","Data":"8422d0887440987364187bf134b6c949d8511ecd00492cbdc7810c5fb2dc1945"} Nov 26 05:42:21 crc kubenswrapper[4871]: I1126 05:42:21.064823 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" event={"ID":"45374a1a-cee6-4d53-8643-e185d317425c","Type":"ContainerStarted","Data":"49f07507965162e0c729182081ec6ddd43e73e586894f8531b291f7a63fea130"} Nov 26 05:42:21 crc kubenswrapper[4871]: I1126 05:42:21.121605 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" podStartSLOduration=3.121585931 podStartE2EDuration="3.121585931s" podCreationTimestamp="2025-11-26 05:42:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:42:21.11460968 +0000 UTC m=+999.297661266" watchObservedRunningTime="2025-11-26 05:42:21.121585931 +0000 UTC m=+999.304637517" Nov 26 05:42:22 crc kubenswrapper[4871]: I1126 05:42:22.070746 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:23 crc kubenswrapper[4871]: I1126 05:42:23.082875 4871 generic.go:334] "Generic (PLEG): container finished" podID="25606939-d595-4bfc-aead-c40883fdae31" containerID="b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f" exitCode=0 Nov 26 05:42:23 crc kubenswrapper[4871]: I1126 05:42:23.083587 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"25606939-d595-4bfc-aead-c40883fdae31","Type":"ContainerDied","Data":"b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f"} Nov 26 05:42:23 crc kubenswrapper[4871]: I1126 05:42:23.606776 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" Nov 26 05:42:25 crc kubenswrapper[4871]: I1126 05:42:25.115029 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe","Type":"ContainerStarted","Data":"f1c5c848e7f79e84b1628ff1a929c4a766f69ad29223e1d254fc4640bade2c42"} Nov 26 05:42:26 crc kubenswrapper[4871]: I1126 05:42:26.130220 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"2757b1a6-7b8f-4008-8a08-96985496ec1a","Type":"ContainerStarted","Data":"27a2fb1e1378e5104187e62a082258188269a200829220be5b685e709ecdb140"} Nov 26 05:42:26 crc kubenswrapper[4871]: I1126 05:42:26.130915 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 26 05:42:26 crc kubenswrapper[4871]: I1126 05:42:26.136365 4871 generic.go:334] "Generic (PLEG): container finished" podID="fef4681d-3f18-4ed5-b251-92f53274dacd" containerID="8422d0887440987364187bf134b6c949d8511ecd00492cbdc7810c5fb2dc1945" exitCode=0 Nov 26 05:42:26 crc kubenswrapper[4871]: I1126 05:42:26.136408 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"fef4681d-3f18-4ed5-b251-92f53274dacd","Type":"ContainerDied","Data":"8422d0887440987364187bf134b6c949d8511ecd00492cbdc7810c5fb2dc1945"} Nov 26 05:42:26 crc kubenswrapper[4871]: I1126 05:42:26.138502 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"7df95f1b-7a5b-445e-bb56-b17695a0bde9","Type":"ContainerStarted","Data":"79364121dfc1db6ae8045d060d96f8aa6dcc8b206d819a10ae064a1efe7325b7"} Nov 26 05:42:26 crc kubenswrapper[4871]: I1126 05:42:26.142187 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b3f9dfba-a3a9-45ef-a96c-91c654671b97","Type":"ContainerStarted","Data":"7a6e02e0a5f846c42f26119c46b3293cb428ccde51bfa28d8eb0ed9051685927"} Nov 26 05:42:26 crc kubenswrapper[4871]: I1126 05:42:26.157214 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=1.9182146260000001 podStartE2EDuration="51.157189856s" podCreationTimestamp="2025-11-26 05:41:35 +0000 UTC" firstStartedPulling="2025-11-26 05:41:36.46348364 +0000 UTC m=+954.646535226" lastFinishedPulling="2025-11-26 05:42:25.70245886 +0000 UTC m=+1003.885510456" observedRunningTime="2025-11-26 05:42:26.151822664 +0000 UTC m=+1004.334874260" watchObservedRunningTime="2025-11-26 05:42:26.157189856 +0000 UTC m=+1004.340241442" Nov 26 05:42:27 crc kubenswrapper[4871]: I1126 05:42:27.154332 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"fef4681d-3f18-4ed5-b251-92f53274dacd","Type":"ContainerStarted","Data":"bd1ea262d4b3968832d0006e2bdd789e320771ca8deccf0c0c26fcc615105ff7"} Nov 26 05:42:27 crc kubenswrapper[4871]: I1126 05:42:27.188004 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=9.190954938 podStartE2EDuration="55.187984186s" podCreationTimestamp="2025-11-26 05:41:32 +0000 UTC" firstStartedPulling="2025-11-26 05:41:34.690484144 +0000 UTC m=+952.873535740" lastFinishedPulling="2025-11-26 05:42:20.687513402 +0000 UTC m=+998.870564988" observedRunningTime="2025-11-26 05:42:27.178863562 +0000 UTC m=+1005.361915148" watchObservedRunningTime="2025-11-26 05:42:27.187984186 +0000 UTC m=+1005.371035772" Nov 26 05:42:28 crc kubenswrapper[4871]: I1126 05:42:28.161322 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-t9t82" event={"ID":"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e","Type":"ContainerStarted","Data":"bee411bb196e10664f85dd83c2688a2e532ad6adfe1afc6bfb11bd86c6cea226"} Nov 26 05:42:28 crc kubenswrapper[4871]: I1126 05:42:28.163590 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4ba97673-d74c-47df-acae-f2dcc1ed10df","Type":"ContainerStarted","Data":"d56886216524c0c1586b2b6af70c6b9c3cb40243a032857c40c053aea9413760"} Nov 26 05:42:28 crc kubenswrapper[4871]: I1126 05:42:28.912681 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:28 crc kubenswrapper[4871]: I1126 05:42:28.966538 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6fb75c485f-9gd4f"] Nov 26 05:42:28 crc kubenswrapper[4871]: I1126 05:42:28.966763 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" podUID="05f4a358-58da-4fd9-a7d2-ef651ac303d7" containerName="dnsmasq-dns" containerID="cri-o://a546588e7dbf307f85f20a818e564106a3991cd5ff24026d382eb20d880624cc" gracePeriod=10 Nov 26 05:42:29 crc kubenswrapper[4871]: I1126 05:42:29.170651 4871 generic.go:334] "Generic (PLEG): container finished" podID="05f4a358-58da-4fd9-a7d2-ef651ac303d7" containerID="a546588e7dbf307f85f20a818e564106a3991cd5ff24026d382eb20d880624cc" exitCode=0 Nov 26 05:42:29 crc kubenswrapper[4871]: I1126 05:42:29.170728 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" event={"ID":"05f4a358-58da-4fd9-a7d2-ef651ac303d7","Type":"ContainerDied","Data":"a546588e7dbf307f85f20a818e564106a3991cd5ff24026d382eb20d880624cc"} Nov 26 05:42:29 crc kubenswrapper[4871]: I1126 05:42:29.175566 4871 generic.go:334] "Generic (PLEG): container finished" podID="1a6ce456-795f-4bf1-bab9-f5de7cfd7abe" containerID="f1c5c848e7f79e84b1628ff1a929c4a766f69ad29223e1d254fc4640bade2c42" exitCode=0 Nov 26 05:42:29 crc kubenswrapper[4871]: I1126 05:42:29.175883 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe","Type":"ContainerDied","Data":"f1c5c848e7f79e84b1628ff1a929c4a766f69ad29223e1d254fc4640bade2c42"} Nov 26 05:42:29 crc kubenswrapper[4871]: I1126 05:42:29.182136 4871 generic.go:334] "Generic (PLEG): container finished" podID="9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e" containerID="bee411bb196e10664f85dd83c2688a2e532ad6adfe1afc6bfb11bd86c6cea226" exitCode=0 Nov 26 05:42:29 crc kubenswrapper[4871]: I1126 05:42:29.182177 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-t9t82" event={"ID":"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e","Type":"ContainerDied","Data":"bee411bb196e10664f85dd83c2688a2e532ad6adfe1afc6bfb11bd86c6cea226"} Nov 26 05:42:30 crc kubenswrapper[4871]: I1126 05:42:30.950007 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.014397 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.120344 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-98mg9\" (UniqueName: \"kubernetes.io/projected/05f4a358-58da-4fd9-a7d2-ef651ac303d7-kube-api-access-98mg9\") pod \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\" (UID: \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\") " Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.120392 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/05f4a358-58da-4fd9-a7d2-ef651ac303d7-ovsdbserver-nb\") pod \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\" (UID: \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\") " Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.120638 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/05f4a358-58da-4fd9-a7d2-ef651ac303d7-dns-svc\") pod \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\" (UID: \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\") " Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.120673 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05f4a358-58da-4fd9-a7d2-ef651ac303d7-config\") pod \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\" (UID: \"05f4a358-58da-4fd9-a7d2-ef651ac303d7\") " Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.124751 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05f4a358-58da-4fd9-a7d2-ef651ac303d7-kube-api-access-98mg9" (OuterVolumeSpecName: "kube-api-access-98mg9") pod "05f4a358-58da-4fd9-a7d2-ef651ac303d7" (UID: "05f4a358-58da-4fd9-a7d2-ef651ac303d7"). InnerVolumeSpecName "kube-api-access-98mg9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.155304 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/05f4a358-58da-4fd9-a7d2-ef651ac303d7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "05f4a358-58da-4fd9-a7d2-ef651ac303d7" (UID: "05f4a358-58da-4fd9-a7d2-ef651ac303d7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.159917 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/05f4a358-58da-4fd9-a7d2-ef651ac303d7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "05f4a358-58da-4fd9-a7d2-ef651ac303d7" (UID: "05f4a358-58da-4fd9-a7d2-ef651ac303d7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.170087 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/05f4a358-58da-4fd9-a7d2-ef651ac303d7-config" (OuterVolumeSpecName: "config") pod "05f4a358-58da-4fd9-a7d2-ef651ac303d7" (UID: "05f4a358-58da-4fd9-a7d2-ef651ac303d7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.207179 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" event={"ID":"05f4a358-58da-4fd9-a7d2-ef651ac303d7","Type":"ContainerDied","Data":"dd32febae17da52da97b9fb33977056bf77afeecf02ed4c7953e59706e22a04c"} Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.207238 4871 scope.go:117] "RemoveContainer" containerID="a546588e7dbf307f85f20a818e564106a3991cd5ff24026d382eb20d880624cc" Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.207605 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6fb75c485f-9gd4f" Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.222610 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1a6ce456-795f-4bf1-bab9-f5de7cfd7abe","Type":"ContainerStarted","Data":"64994b711e35c3f49041d4acd6e444f61f0f94c819062be96c084a77ef06858f"} Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.226687 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-t9t82" event={"ID":"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e","Type":"ContainerStarted","Data":"b540f9fd335841e956dd1d70fd589bb0f6269d4911c3b045daaa1469025730de"} Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.229364 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"25606939-d595-4bfc-aead-c40883fdae31","Type":"ContainerStarted","Data":"022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a"} Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.230411 4871 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/05f4a358-58da-4fd9-a7d2-ef651ac303d7-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.230445 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05f4a358-58da-4fd9-a7d2-ef651ac303d7-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.230468 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-98mg9\" (UniqueName: \"kubernetes.io/projected/05f4a358-58da-4fd9-a7d2-ef651ac303d7-kube-api-access-98mg9\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.230485 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/05f4a358-58da-4fd9-a7d2-ef651ac303d7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.275629 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=-9223371979.579166 podStartE2EDuration="57.275609464s" podCreationTimestamp="2025-11-26 05:41:34 +0000 UTC" firstStartedPulling="2025-11-26 05:41:36.359158696 +0000 UTC m=+954.542210282" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:42:31.251183295 +0000 UTC m=+1009.434234881" watchObservedRunningTime="2025-11-26 05:42:31.275609464 +0000 UTC m=+1009.458661060" Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.277221 4871 scope.go:117] "RemoveContainer" containerID="bde42051da0c37078bfa36b6d921d70e2b3efff9dbd824504696b223fc6853ec" Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.283753 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6fb75c485f-9gd4f"] Nov 26 05:42:31 crc kubenswrapper[4871]: I1126 05:42:31.290000 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6fb75c485f-9gd4f"] Nov 26 05:42:32 crc kubenswrapper[4871]: I1126 05:42:32.245208 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-t9t82" event={"ID":"9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e","Type":"ContainerStarted","Data":"5dca7a340d482f81d7364d78fc8a9775c363cb7c1f491a628730543cce7274ae"} Nov 26 05:42:32 crc kubenswrapper[4871]: I1126 05:42:32.245631 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:42:32 crc kubenswrapper[4871]: I1126 05:42:32.245664 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:42:32 crc kubenswrapper[4871]: I1126 05:42:32.269066 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-t9t82" podStartSLOduration=6.331638629 podStartE2EDuration="51.269047958s" podCreationTimestamp="2025-11-26 05:41:41 +0000 UTC" firstStartedPulling="2025-11-26 05:41:42.760632711 +0000 UTC m=+960.943684297" lastFinishedPulling="2025-11-26 05:42:27.69804204 +0000 UTC m=+1005.881093626" observedRunningTime="2025-11-26 05:42:32.264900706 +0000 UTC m=+1010.447952302" watchObservedRunningTime="2025-11-26 05:42:32.269047958 +0000 UTC m=+1010.452099554" Nov 26 05:42:32 crc kubenswrapper[4871]: I1126 05:42:32.519824 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05f4a358-58da-4fd9-a7d2-ef651ac303d7" path="/var/lib/kubelet/pods/05f4a358-58da-4fd9-a7d2-ef651ac303d7/volumes" Nov 26 05:42:33 crc kubenswrapper[4871]: I1126 05:42:33.256437 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"df0ee863-8fbb-4a6e-86e3-8d56cf38da47","Type":"ContainerStarted","Data":"f8443926b1a806c742591a4b77bc2bacb9708fefbed24596fc5615c3c68eadf5"} Nov 26 05:42:33 crc kubenswrapper[4871]: I1126 05:42:33.274636 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=5.45719464 podStartE2EDuration="53.274610939s" podCreationTimestamp="2025-11-26 05:41:40 +0000 UTC" firstStartedPulling="2025-11-26 05:41:44.885070923 +0000 UTC m=+963.068122509" lastFinishedPulling="2025-11-26 05:42:32.702487212 +0000 UTC m=+1010.885538808" observedRunningTime="2025-11-26 05:42:33.273563013 +0000 UTC m=+1011.456614629" watchObservedRunningTime="2025-11-26 05:42:33.274610939 +0000 UTC m=+1011.457662545" Nov 26 05:42:33 crc kubenswrapper[4871]: I1126 05:42:33.327338 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 26 05:42:34 crc kubenswrapper[4871]: I1126 05:42:34.179984 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 26 05:42:34 crc kubenswrapper[4871]: I1126 05:42:34.180340 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 26 05:42:34 crc kubenswrapper[4871]: I1126 05:42:34.278285 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"25606939-d595-4bfc-aead-c40883fdae31","Type":"ContainerStarted","Data":"914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983"} Nov 26 05:42:34 crc kubenswrapper[4871]: I1126 05:42:34.367944 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 26 05:42:34 crc kubenswrapper[4871]: I1126 05:42:34.504729 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.657564 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-3b84-account-create-update-wz5gq"] Nov 26 05:42:35 crc kubenswrapper[4871]: E1126 05:42:35.657863 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05f4a358-58da-4fd9-a7d2-ef651ac303d7" containerName="init" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.657875 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="05f4a358-58da-4fd9-a7d2-ef651ac303d7" containerName="init" Nov 26 05:42:35 crc kubenswrapper[4871]: E1126 05:42:35.657894 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05f4a358-58da-4fd9-a7d2-ef651ac303d7" containerName="dnsmasq-dns" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.657900 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="05f4a358-58da-4fd9-a7d2-ef651ac303d7" containerName="dnsmasq-dns" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.658036 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="05f4a358-58da-4fd9-a7d2-ef651ac303d7" containerName="dnsmasq-dns" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.658563 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-3b84-account-create-update-wz5gq" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.661060 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.665335 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-r7k7z"] Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.667690 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-r7k7z" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.669866 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-3b84-account-create-update-wz5gq"] Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.696726 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-r7k7z"] Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.711644 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4p9wz\" (UniqueName: \"kubernetes.io/projected/a56ec863-f1f4-48f3-b167-54ec413401f1-kube-api-access-4p9wz\") pod \"keystone-3b84-account-create-update-wz5gq\" (UID: \"a56ec863-f1f4-48f3-b167-54ec413401f1\") " pod="openstack/keystone-3b84-account-create-update-wz5gq" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.711756 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a56ec863-f1f4-48f3-b167-54ec413401f1-operator-scripts\") pod \"keystone-3b84-account-create-update-wz5gq\" (UID: \"a56ec863-f1f4-48f3-b167-54ec413401f1\") " pod="openstack/keystone-3b84-account-create-update-wz5gq" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.812709 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-868zm\" (UniqueName: \"kubernetes.io/projected/9f197c15-87fa-40c6-9cbf-200c9746aba7-kube-api-access-868zm\") pod \"keystone-db-create-r7k7z\" (UID: \"9f197c15-87fa-40c6-9cbf-200c9746aba7\") " pod="openstack/keystone-db-create-r7k7z" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.812768 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a56ec863-f1f4-48f3-b167-54ec413401f1-operator-scripts\") pod \"keystone-3b84-account-create-update-wz5gq\" (UID: \"a56ec863-f1f4-48f3-b167-54ec413401f1\") " pod="openstack/keystone-3b84-account-create-update-wz5gq" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.812969 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4p9wz\" (UniqueName: \"kubernetes.io/projected/a56ec863-f1f4-48f3-b167-54ec413401f1-kube-api-access-4p9wz\") pod \"keystone-3b84-account-create-update-wz5gq\" (UID: \"a56ec863-f1f4-48f3-b167-54ec413401f1\") " pod="openstack/keystone-3b84-account-create-update-wz5gq" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.813115 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f197c15-87fa-40c6-9cbf-200c9746aba7-operator-scripts\") pod \"keystone-db-create-r7k7z\" (UID: \"9f197c15-87fa-40c6-9cbf-200c9746aba7\") " pod="openstack/keystone-db-create-r7k7z" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.813493 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a56ec863-f1f4-48f3-b167-54ec413401f1-operator-scripts\") pod \"keystone-3b84-account-create-update-wz5gq\" (UID: \"a56ec863-f1f4-48f3-b167-54ec413401f1\") " pod="openstack/keystone-3b84-account-create-update-wz5gq" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.832542 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4p9wz\" (UniqueName: \"kubernetes.io/projected/a56ec863-f1f4-48f3-b167-54ec413401f1-kube-api-access-4p9wz\") pod \"keystone-3b84-account-create-update-wz5gq\" (UID: \"a56ec863-f1f4-48f3-b167-54ec413401f1\") " pod="openstack/keystone-3b84-account-create-update-wz5gq" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.859927 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-btz92"] Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.861660 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-btz92" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.866419 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.866472 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.889341 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-btz92"] Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.915305 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f197c15-87fa-40c6-9cbf-200c9746aba7-operator-scripts\") pod \"keystone-db-create-r7k7z\" (UID: \"9f197c15-87fa-40c6-9cbf-200c9746aba7\") " pod="openstack/keystone-db-create-r7k7z" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.915699 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-868zm\" (UniqueName: \"kubernetes.io/projected/9f197c15-87fa-40c6-9cbf-200c9746aba7-kube-api-access-868zm\") pod \"keystone-db-create-r7k7z\" (UID: \"9f197c15-87fa-40c6-9cbf-200c9746aba7\") " pod="openstack/keystone-db-create-r7k7z" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.915851 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8e1692f-fcdb-4735-b4b4-904fb9c9da85-operator-scripts\") pod \"placement-db-create-btz92\" (UID: \"f8e1692f-fcdb-4735-b4b4-904fb9c9da85\") " pod="openstack/placement-db-create-btz92" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.916079 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sb97w\" (UniqueName: \"kubernetes.io/projected/f8e1692f-fcdb-4735-b4b4-904fb9c9da85-kube-api-access-sb97w\") pod \"placement-db-create-btz92\" (UID: \"f8e1692f-fcdb-4735-b4b4-904fb9c9da85\") " pod="openstack/placement-db-create-btz92" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.916132 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f197c15-87fa-40c6-9cbf-200c9746aba7-operator-scripts\") pod \"keystone-db-create-r7k7z\" (UID: \"9f197c15-87fa-40c6-9cbf-200c9746aba7\") " pod="openstack/keystone-db-create-r7k7z" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.932564 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-868zm\" (UniqueName: \"kubernetes.io/projected/9f197c15-87fa-40c6-9cbf-200c9746aba7-kube-api-access-868zm\") pod \"keystone-db-create-r7k7z\" (UID: \"9f197c15-87fa-40c6-9cbf-200c9746aba7\") " pod="openstack/keystone-db-create-r7k7z" Nov 26 05:42:35 crc kubenswrapper[4871]: I1126 05:42:35.998884 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5f39-account-create-update-wn6v7"] Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.000680 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5f39-account-create-update-wn6v7" Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.002685 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-3b84-account-create-update-wz5gq" Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.003602 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.013408 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5f39-account-create-update-wn6v7"] Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.021903 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-r7k7z" Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.028468 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8e1692f-fcdb-4735-b4b4-904fb9c9da85-operator-scripts\") pod \"placement-db-create-btz92\" (UID: \"f8e1692f-fcdb-4735-b4b4-904fb9c9da85\") " pod="openstack/placement-db-create-btz92" Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.028695 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sb97w\" (UniqueName: \"kubernetes.io/projected/f8e1692f-fcdb-4735-b4b4-904fb9c9da85-kube-api-access-sb97w\") pod \"placement-db-create-btz92\" (UID: \"f8e1692f-fcdb-4735-b4b4-904fb9c9da85\") " pod="openstack/placement-db-create-btz92" Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.030778 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8e1692f-fcdb-4735-b4b4-904fb9c9da85-operator-scripts\") pod \"placement-db-create-btz92\" (UID: \"f8e1692f-fcdb-4735-b4b4-904fb9c9da85\") " pod="openstack/placement-db-create-btz92" Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.053090 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sb97w\" (UniqueName: \"kubernetes.io/projected/f8e1692f-fcdb-4735-b4b4-904fb9c9da85-kube-api-access-sb97w\") pod \"placement-db-create-btz92\" (UID: \"f8e1692f-fcdb-4735-b4b4-904fb9c9da85\") " pod="openstack/placement-db-create-btz92" Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.130697 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dd7dl\" (UniqueName: \"kubernetes.io/projected/ef4d4490-6b6b-406b-b626-f135975b6e4a-kube-api-access-dd7dl\") pod \"placement-5f39-account-create-update-wn6v7\" (UID: \"ef4d4490-6b6b-406b-b626-f135975b6e4a\") " pod="openstack/placement-5f39-account-create-update-wn6v7" Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.131065 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef4d4490-6b6b-406b-b626-f135975b6e4a-operator-scripts\") pod \"placement-5f39-account-create-update-wn6v7\" (UID: \"ef4d4490-6b6b-406b-b626-f135975b6e4a\") " pod="openstack/placement-5f39-account-create-update-wn6v7" Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.202808 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-btz92" Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.233718 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dd7dl\" (UniqueName: \"kubernetes.io/projected/ef4d4490-6b6b-406b-b626-f135975b6e4a-kube-api-access-dd7dl\") pod \"placement-5f39-account-create-update-wn6v7\" (UID: \"ef4d4490-6b6b-406b-b626-f135975b6e4a\") " pod="openstack/placement-5f39-account-create-update-wn6v7" Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.233792 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef4d4490-6b6b-406b-b626-f135975b6e4a-operator-scripts\") pod \"placement-5f39-account-create-update-wn6v7\" (UID: \"ef4d4490-6b6b-406b-b626-f135975b6e4a\") " pod="openstack/placement-5f39-account-create-update-wn6v7" Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.234659 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef4d4490-6b6b-406b-b626-f135975b6e4a-operator-scripts\") pod \"placement-5f39-account-create-update-wn6v7\" (UID: \"ef4d4490-6b6b-406b-b626-f135975b6e4a\") " pod="openstack/placement-5f39-account-create-update-wn6v7" Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.252846 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dd7dl\" (UniqueName: \"kubernetes.io/projected/ef4d4490-6b6b-406b-b626-f135975b6e4a-kube-api-access-dd7dl\") pod \"placement-5f39-account-create-update-wn6v7\" (UID: \"ef4d4490-6b6b-406b-b626-f135975b6e4a\") " pod="openstack/placement-5f39-account-create-update-wn6v7" Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.326124 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5f39-account-create-update-wn6v7" Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.366595 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.367812 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.528259 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-3b84-account-create-update-wz5gq"] Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.604382 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-r7k7z"] Nov 26 05:42:36 crc kubenswrapper[4871]: W1126 05:42:36.609830 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9f197c15_87fa_40c6_9cbf_200c9746aba7.slice/crio-85837c6b33a2d20d89d5b36d5167e3c47034cf8877207f0632623e6974ac9948 WatchSource:0}: Error finding container 85837c6b33a2d20d89d5b36d5167e3c47034cf8877207f0632623e6974ac9948: Status 404 returned error can't find the container with id 85837c6b33a2d20d89d5b36d5167e3c47034cf8877207f0632623e6974ac9948 Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.725043 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-btz92"] Nov 26 05:42:36 crc kubenswrapper[4871]: W1126 05:42:36.728259 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8e1692f_fcdb_4735_b4b4_904fb9c9da85.slice/crio-9db1842f0798cb319443075465b8bef405cc52f6740b9b9d19c2cae6bb153802 WatchSource:0}: Error finding container 9db1842f0798cb319443075465b8bef405cc52f6740b9b9d19c2cae6bb153802: Status 404 returned error can't find the container with id 9db1842f0798cb319443075465b8bef405cc52f6740b9b9d19c2cae6bb153802 Nov 26 05:42:36 crc kubenswrapper[4871]: I1126 05:42:36.800255 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5f39-account-create-update-wn6v7"] Nov 26 05:42:36 crc kubenswrapper[4871]: W1126 05:42:36.809704 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podef4d4490_6b6b_406b_b626_f135975b6e4a.slice/crio-18bde9963556c8a3c61614e973b7fd2cb45f2d4a7fc161f781f5c36a63f865fc WatchSource:0}: Error finding container 18bde9963556c8a3c61614e973b7fd2cb45f2d4a7fc161f781f5c36a63f865fc: Status 404 returned error can't find the container with id 18bde9963556c8a3c61614e973b7fd2cb45f2d4a7fc161f781f5c36a63f865fc Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.305942 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-btz92" event={"ID":"f8e1692f-fcdb-4735-b4b4-904fb9c9da85","Type":"ContainerStarted","Data":"9db1842f0798cb319443075465b8bef405cc52f6740b9b9d19c2cae6bb153802"} Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.307618 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-3b84-account-create-update-wz5gq" event={"ID":"a56ec863-f1f4-48f3-b167-54ec413401f1","Type":"ContainerStarted","Data":"2049fa3b7ec26b08bd6273e9eff1e232b177aa480e00bea3ff6afbba959a4586"} Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.309468 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5f39-account-create-update-wn6v7" event={"ID":"ef4d4490-6b6b-406b-b626-f135975b6e4a","Type":"ContainerStarted","Data":"18bde9963556c8a3c61614e973b7fd2cb45f2d4a7fc161f781f5c36a63f865fc"} Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.310969 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-r7k7z" event={"ID":"9f197c15-87fa-40c6-9cbf-200c9746aba7","Type":"ContainerStarted","Data":"85837c6b33a2d20d89d5b36d5167e3c47034cf8877207f0632623e6974ac9948"} Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.686379 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-create-pnbw4"] Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.687654 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-pnbw4" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.704672 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-pnbw4"] Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.748094 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-76f9c4c8bc-56mq5"] Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.749362 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.769427 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36652a1c-6392-4693-86fd-2ec4c2955cd6-operator-scripts\") pod \"watcher-db-create-pnbw4\" (UID: \"36652a1c-6392-4693-86fd-2ec4c2955cd6\") " pod="openstack/watcher-db-create-pnbw4" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.769524 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2wm2\" (UniqueName: \"kubernetes.io/projected/36652a1c-6392-4693-86fd-2ec4c2955cd6-kube-api-access-j2wm2\") pod \"watcher-db-create-pnbw4\" (UID: \"36652a1c-6392-4693-86fd-2ec4c2955cd6\") " pod="openstack/watcher-db-create-pnbw4" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.805604 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76f9c4c8bc-56mq5"] Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.838629 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-4e26-account-create-update-6rmj5"] Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.839659 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-4e26-account-create-update-6rmj5" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.841593 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-db-secret" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.866097 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-4e26-account-create-update-6rmj5"] Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.870488 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-config\") pod \"dnsmasq-dns-76f9c4c8bc-56mq5\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.872800 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36652a1c-6392-4693-86fd-2ec4c2955cd6-operator-scripts\") pod \"watcher-db-create-pnbw4\" (UID: \"36652a1c-6392-4693-86fd-2ec4c2955cd6\") " pod="openstack/watcher-db-create-pnbw4" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.872855 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5z2z\" (UniqueName: \"kubernetes.io/projected/468d6a43-d467-4606-b7fd-a39e765a72e1-kube-api-access-x5z2z\") pod \"dnsmasq-dns-76f9c4c8bc-56mq5\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.872899 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-dns-svc\") pod \"dnsmasq-dns-76f9c4c8bc-56mq5\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.872942 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2wm2\" (UniqueName: \"kubernetes.io/projected/36652a1c-6392-4693-86fd-2ec4c2955cd6-kube-api-access-j2wm2\") pod \"watcher-db-create-pnbw4\" (UID: \"36652a1c-6392-4693-86fd-2ec4c2955cd6\") " pod="openstack/watcher-db-create-pnbw4" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.873180 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-ovsdbserver-nb\") pod \"dnsmasq-dns-76f9c4c8bc-56mq5\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.873236 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-ovsdbserver-sb\") pod \"dnsmasq-dns-76f9c4c8bc-56mq5\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.873556 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36652a1c-6392-4693-86fd-2ec4c2955cd6-operator-scripts\") pod \"watcher-db-create-pnbw4\" (UID: \"36652a1c-6392-4693-86fd-2ec4c2955cd6\") " pod="openstack/watcher-db-create-pnbw4" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.897378 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2wm2\" (UniqueName: \"kubernetes.io/projected/36652a1c-6392-4693-86fd-2ec4c2955cd6-kube-api-access-j2wm2\") pod \"watcher-db-create-pnbw4\" (UID: \"36652a1c-6392-4693-86fd-2ec4c2955cd6\") " pod="openstack/watcher-db-create-pnbw4" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.974786 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-ovsdbserver-nb\") pod \"dnsmasq-dns-76f9c4c8bc-56mq5\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.974826 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-ovsdbserver-sb\") pod \"dnsmasq-dns-76f9c4c8bc-56mq5\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.974863 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fmsg\" (UniqueName: \"kubernetes.io/projected/54b1fab8-ed9d-41f9-bd32-504ea14de7f7-kube-api-access-7fmsg\") pod \"watcher-4e26-account-create-update-6rmj5\" (UID: \"54b1fab8-ed9d-41f9-bd32-504ea14de7f7\") " pod="openstack/watcher-4e26-account-create-update-6rmj5" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.974901 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-config\") pod \"dnsmasq-dns-76f9c4c8bc-56mq5\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.974930 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/54b1fab8-ed9d-41f9-bd32-504ea14de7f7-operator-scripts\") pod \"watcher-4e26-account-create-update-6rmj5\" (UID: \"54b1fab8-ed9d-41f9-bd32-504ea14de7f7\") " pod="openstack/watcher-4e26-account-create-update-6rmj5" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.974962 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5z2z\" (UniqueName: \"kubernetes.io/projected/468d6a43-d467-4606-b7fd-a39e765a72e1-kube-api-access-x5z2z\") pod \"dnsmasq-dns-76f9c4c8bc-56mq5\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.974990 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-dns-svc\") pod \"dnsmasq-dns-76f9c4c8bc-56mq5\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.975904 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-dns-svc\") pod \"dnsmasq-dns-76f9c4c8bc-56mq5\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.975977 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-ovsdbserver-sb\") pod \"dnsmasq-dns-76f9c4c8bc-56mq5\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.976157 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-config\") pod \"dnsmasq-dns-76f9c4c8bc-56mq5\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.976435 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-ovsdbserver-nb\") pod \"dnsmasq-dns-76f9c4c8bc-56mq5\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:42:37 crc kubenswrapper[4871]: I1126 05:42:37.993487 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5z2z\" (UniqueName: \"kubernetes.io/projected/468d6a43-d467-4606-b7fd-a39e765a72e1-kube-api-access-x5z2z\") pod \"dnsmasq-dns-76f9c4c8bc-56mq5\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.019772 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-pnbw4" Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.076612 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fmsg\" (UniqueName: \"kubernetes.io/projected/54b1fab8-ed9d-41f9-bd32-504ea14de7f7-kube-api-access-7fmsg\") pod \"watcher-4e26-account-create-update-6rmj5\" (UID: \"54b1fab8-ed9d-41f9-bd32-504ea14de7f7\") " pod="openstack/watcher-4e26-account-create-update-6rmj5" Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.076692 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/54b1fab8-ed9d-41f9-bd32-504ea14de7f7-operator-scripts\") pod \"watcher-4e26-account-create-update-6rmj5\" (UID: \"54b1fab8-ed9d-41f9-bd32-504ea14de7f7\") " pod="openstack/watcher-4e26-account-create-update-6rmj5" Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.077422 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/54b1fab8-ed9d-41f9-bd32-504ea14de7f7-operator-scripts\") pod \"watcher-4e26-account-create-update-6rmj5\" (UID: \"54b1fab8-ed9d-41f9-bd32-504ea14de7f7\") " pod="openstack/watcher-4e26-account-create-update-6rmj5" Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.096259 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fmsg\" (UniqueName: \"kubernetes.io/projected/54b1fab8-ed9d-41f9-bd32-504ea14de7f7-kube-api-access-7fmsg\") pod \"watcher-4e26-account-create-update-6rmj5\" (UID: \"54b1fab8-ed9d-41f9-bd32-504ea14de7f7\") " pod="openstack/watcher-4e26-account-create-update-6rmj5" Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.102106 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.176960 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-4e26-account-create-update-6rmj5" Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.455053 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-pnbw4"] Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.580550 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76f9c4c8bc-56mq5"] Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.744296 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-4e26-account-create-update-6rmj5"] Nov 26 05:42:38 crc kubenswrapper[4871]: W1126 05:42:38.750536 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54b1fab8_ed9d_41f9_bd32_504ea14de7f7.slice/crio-e51d01f7dbb205a009f25015c92e8073a09f74c65d92010e8cc8f86f48f29f73 WatchSource:0}: Error finding container e51d01f7dbb205a009f25015c92e8073a09f74c65d92010e8cc8f86f48f29f73: Status 404 returned error can't find the container with id e51d01f7dbb205a009f25015c92e8073a09f74c65d92010e8cc8f86f48f29f73 Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.841392 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.855094 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.859030 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.859303 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.859364 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.859647 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-4jdrz" Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.880314 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.901558 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkl86\" (UniqueName: \"kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-kube-api-access-nkl86\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.901652 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-cache\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.901710 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.901777 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:42:38 crc kubenswrapper[4871]: I1126 05:42:38.901815 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-lock\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.003258 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.003330 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-lock\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.003395 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkl86\" (UniqueName: \"kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-kube-api-access-nkl86\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.003480 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-cache\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:42:39 crc kubenswrapper[4871]: E1126 05:42:39.003494 4871 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 05:42:39 crc kubenswrapper[4871]: E1126 05:42:39.003546 4871 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.003557 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:42:39 crc kubenswrapper[4871]: E1126 05:42:39.003614 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift podName:c927c3b8-9d32-4cbb-97cc-d834a6e225c1 nodeName:}" failed. No retries permitted until 2025-11-26 05:42:39.503589696 +0000 UTC m=+1017.686641302 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift") pod "swift-storage-0" (UID: "c927c3b8-9d32-4cbb-97cc-d834a6e225c1") : configmap "swift-ring-files" not found Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.003794 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-lock\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.003997 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/swift-storage-0" Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.004287 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-cache\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.030004 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkl86\" (UniqueName: \"kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-kube-api-access-nkl86\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.032816 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.341094 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5f39-account-create-update-wn6v7" event={"ID":"ef4d4490-6b6b-406b-b626-f135975b6e4a","Type":"ContainerStarted","Data":"5fed9628adca9f4839b5ff67ff420d33c13b1d950ef598988443b8372f13d1ba"} Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.342680 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-4e26-account-create-update-6rmj5" event={"ID":"54b1fab8-ed9d-41f9-bd32-504ea14de7f7","Type":"ContainerStarted","Data":"e51d01f7dbb205a009f25015c92e8073a09f74c65d92010e8cc8f86f48f29f73"} Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.344997 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-r7k7z" event={"ID":"9f197c15-87fa-40c6-9cbf-200c9746aba7","Type":"ContainerStarted","Data":"df80e4c8d168c0118fa7f1f905cb66cd1e44ca021cf15aedb5e51a40ea0d0d62"} Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.347222 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-btz92" event={"ID":"f8e1692f-fcdb-4735-b4b4-904fb9c9da85","Type":"ContainerStarted","Data":"083f93c8a643487d9983932214b1df84f495dca5380e7d14a28a636a6d29a2e1"} Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.349258 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-3b84-account-create-update-wz5gq" event={"ID":"a56ec863-f1f4-48f3-b167-54ec413401f1","Type":"ContainerStarted","Data":"d3691846afd5a7c4106cdf341d2421dfacf83c13fa69bcb02c8768296823c6db"} Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.350334 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" event={"ID":"468d6a43-d467-4606-b7fd-a39e765a72e1","Type":"ContainerStarted","Data":"c0ee53108ef1efee48b0bfee0b628bb492b2f4bb91c3f56fb02e1e9617ef9346"} Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.351867 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-pnbw4" event={"ID":"36652a1c-6392-4693-86fd-2ec4c2955cd6","Type":"ContainerStarted","Data":"cb9c5e4989892cabc82eb43569b3b3448075f546e1d52c806e3d67e33c5bde9d"} Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.351896 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-pnbw4" event={"ID":"36652a1c-6392-4693-86fd-2ec4c2955cd6","Type":"ContainerStarted","Data":"9954e5965bb4411dcf7bc2b5f688328dcacb1f55dd3bbf8972267fc7a2187003"} Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.474852 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.510483 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:42:39 crc kubenswrapper[4871]: E1126 05:42:39.510656 4871 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 05:42:39 crc kubenswrapper[4871]: E1126 05:42:39.510707 4871 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 05:42:39 crc kubenswrapper[4871]: E1126 05:42:39.510752 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift podName:c927c3b8-9d32-4cbb-97cc-d834a6e225c1 nodeName:}" failed. No retries permitted until 2025-11-26 05:42:40.510733988 +0000 UTC m=+1018.693785574 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift") pod "swift-storage-0" (UID: "c927c3b8-9d32-4cbb-97cc-d834a6e225c1") : configmap "swift-ring-files" not found Nov 26 05:42:39 crc kubenswrapper[4871]: I1126 05:42:39.606423 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 26 05:42:40 crc kubenswrapper[4871]: I1126 05:42:40.360563 4871 generic.go:334] "Generic (PLEG): container finished" podID="f8e1692f-fcdb-4735-b4b4-904fb9c9da85" containerID="083f93c8a643487d9983932214b1df84f495dca5380e7d14a28a636a6d29a2e1" exitCode=0 Nov 26 05:42:40 crc kubenswrapper[4871]: I1126 05:42:40.361004 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-btz92" event={"ID":"f8e1692f-fcdb-4735-b4b4-904fb9c9da85","Type":"ContainerDied","Data":"083f93c8a643487d9983932214b1df84f495dca5380e7d14a28a636a6d29a2e1"} Nov 26 05:42:40 crc kubenswrapper[4871]: I1126 05:42:40.362893 4871 generic.go:334] "Generic (PLEG): container finished" podID="a56ec863-f1f4-48f3-b167-54ec413401f1" containerID="d3691846afd5a7c4106cdf341d2421dfacf83c13fa69bcb02c8768296823c6db" exitCode=0 Nov 26 05:42:40 crc kubenswrapper[4871]: I1126 05:42:40.362943 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-3b84-account-create-update-wz5gq" event={"ID":"a56ec863-f1f4-48f3-b167-54ec413401f1","Type":"ContainerDied","Data":"d3691846afd5a7c4106cdf341d2421dfacf83c13fa69bcb02c8768296823c6db"} Nov 26 05:42:40 crc kubenswrapper[4871]: I1126 05:42:40.365225 4871 generic.go:334] "Generic (PLEG): container finished" podID="468d6a43-d467-4606-b7fd-a39e765a72e1" containerID="6243b584b51b95380c8f8cc43682f689e5b3c304b5512dd3b508b0509d7265bd" exitCode=0 Nov 26 05:42:40 crc kubenswrapper[4871]: I1126 05:42:40.365270 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" event={"ID":"468d6a43-d467-4606-b7fd-a39e765a72e1","Type":"ContainerDied","Data":"6243b584b51b95380c8f8cc43682f689e5b3c304b5512dd3b508b0509d7265bd"} Nov 26 05:42:40 crc kubenswrapper[4871]: I1126 05:42:40.367359 4871 generic.go:334] "Generic (PLEG): container finished" podID="ef4d4490-6b6b-406b-b626-f135975b6e4a" containerID="5fed9628adca9f4839b5ff67ff420d33c13b1d950ef598988443b8372f13d1ba" exitCode=0 Nov 26 05:42:40 crc kubenswrapper[4871]: I1126 05:42:40.367407 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5f39-account-create-update-wn6v7" event={"ID":"ef4d4490-6b6b-406b-b626-f135975b6e4a","Type":"ContainerDied","Data":"5fed9628adca9f4839b5ff67ff420d33c13b1d950ef598988443b8372f13d1ba"} Nov 26 05:42:40 crc kubenswrapper[4871]: I1126 05:42:40.371070 4871 generic.go:334] "Generic (PLEG): container finished" podID="54b1fab8-ed9d-41f9-bd32-504ea14de7f7" containerID="618317baff62f8b0e781d4e784538e3afff587433bb9d703ad6e56c94bd46778" exitCode=0 Nov 26 05:42:40 crc kubenswrapper[4871]: I1126 05:42:40.371157 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-4e26-account-create-update-6rmj5" event={"ID":"54b1fab8-ed9d-41f9-bd32-504ea14de7f7","Type":"ContainerDied","Data":"618317baff62f8b0e781d4e784538e3afff587433bb9d703ad6e56c94bd46778"} Nov 26 05:42:40 crc kubenswrapper[4871]: I1126 05:42:40.372663 4871 generic.go:334] "Generic (PLEG): container finished" podID="36652a1c-6392-4693-86fd-2ec4c2955cd6" containerID="cb9c5e4989892cabc82eb43569b3b3448075f546e1d52c806e3d67e33c5bde9d" exitCode=0 Nov 26 05:42:40 crc kubenswrapper[4871]: I1126 05:42:40.372724 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-pnbw4" event={"ID":"36652a1c-6392-4693-86fd-2ec4c2955cd6","Type":"ContainerDied","Data":"cb9c5e4989892cabc82eb43569b3b3448075f546e1d52c806e3d67e33c5bde9d"} Nov 26 05:42:40 crc kubenswrapper[4871]: I1126 05:42:40.383412 4871 generic.go:334] "Generic (PLEG): container finished" podID="9f197c15-87fa-40c6-9cbf-200c9746aba7" containerID="df80e4c8d168c0118fa7f1f905cb66cd1e44ca021cf15aedb5e51a40ea0d0d62" exitCode=0 Nov 26 05:42:40 crc kubenswrapper[4871]: I1126 05:42:40.384396 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-r7k7z" event={"ID":"9f197c15-87fa-40c6-9cbf-200c9746aba7","Type":"ContainerDied","Data":"df80e4c8d168c0118fa7f1f905cb66cd1e44ca021cf15aedb5e51a40ea0d0d62"} Nov 26 05:42:40 crc kubenswrapper[4871]: I1126 05:42:40.527027 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:42:40 crc kubenswrapper[4871]: E1126 05:42:40.527188 4871 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 05:42:40 crc kubenswrapper[4871]: E1126 05:42:40.527204 4871 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 05:42:40 crc kubenswrapper[4871]: E1126 05:42:40.527244 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift podName:c927c3b8-9d32-4cbb-97cc-d834a6e225c1 nodeName:}" failed. No retries permitted until 2025-11-26 05:42:42.527231327 +0000 UTC m=+1020.710282913 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift") pod "swift-storage-0" (UID: "c927c3b8-9d32-4cbb-97cc-d834a6e225c1") : configmap "swift-ring-files" not found Nov 26 05:42:41 crc kubenswrapper[4871]: I1126 05:42:41.393867 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" event={"ID":"468d6a43-d467-4606-b7fd-a39e765a72e1","Type":"ContainerStarted","Data":"3f21a7f8d9278353eaf1d8f49da2f1913305236e379a2e359f7aa9112776bc88"} Nov 26 05:42:41 crc kubenswrapper[4871]: I1126 05:42:41.417350 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" podStartSLOduration=4.4173264549999995 podStartE2EDuration="4.417326455s" podCreationTimestamp="2025-11-26 05:42:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:42:41.410573659 +0000 UTC m=+1019.593625255" watchObservedRunningTime="2025-11-26 05:42:41.417326455 +0000 UTC m=+1019.600378051" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.204195 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-btz92" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.211264 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-r7k7z" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.263037 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8e1692f-fcdb-4735-b4b4-904fb9c9da85-operator-scripts\") pod \"f8e1692f-fcdb-4735-b4b4-904fb9c9da85\" (UID: \"f8e1692f-fcdb-4735-b4b4-904fb9c9da85\") " Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.263102 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-868zm\" (UniqueName: \"kubernetes.io/projected/9f197c15-87fa-40c6-9cbf-200c9746aba7-kube-api-access-868zm\") pod \"9f197c15-87fa-40c6-9cbf-200c9746aba7\" (UID: \"9f197c15-87fa-40c6-9cbf-200c9746aba7\") " Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.263321 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f197c15-87fa-40c6-9cbf-200c9746aba7-operator-scripts\") pod \"9f197c15-87fa-40c6-9cbf-200c9746aba7\" (UID: \"9f197c15-87fa-40c6-9cbf-200c9746aba7\") " Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.263342 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb97w\" (UniqueName: \"kubernetes.io/projected/f8e1692f-fcdb-4735-b4b4-904fb9c9da85-kube-api-access-sb97w\") pod \"f8e1692f-fcdb-4735-b4b4-904fb9c9da85\" (UID: \"f8e1692f-fcdb-4735-b4b4-904fb9c9da85\") " Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.263655 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f197c15-87fa-40c6-9cbf-200c9746aba7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9f197c15-87fa-40c6-9cbf-200c9746aba7" (UID: "9f197c15-87fa-40c6-9cbf-200c9746aba7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.264266 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8e1692f-fcdb-4735-b4b4-904fb9c9da85-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f8e1692f-fcdb-4735-b4b4-904fb9c9da85" (UID: "f8e1692f-fcdb-4735-b4b4-904fb9c9da85"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.269701 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-pnbw4" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.269764 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8e1692f-fcdb-4735-b4b4-904fb9c9da85-kube-api-access-sb97w" (OuterVolumeSpecName: "kube-api-access-sb97w") pod "f8e1692f-fcdb-4735-b4b4-904fb9c9da85" (UID: "f8e1692f-fcdb-4735-b4b4-904fb9c9da85"). InnerVolumeSpecName "kube-api-access-sb97w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.274157 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f197c15-87fa-40c6-9cbf-200c9746aba7-kube-api-access-868zm" (OuterVolumeSpecName: "kube-api-access-868zm") pod "9f197c15-87fa-40c6-9cbf-200c9746aba7" (UID: "9f197c15-87fa-40c6-9cbf-200c9746aba7"). InnerVolumeSpecName "kube-api-access-868zm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.328725 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5f39-account-create-update-wn6v7" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.336224 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-3b84-account-create-update-wz5gq" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.350597 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-4e26-account-create-update-6rmj5" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.365503 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dd7dl\" (UniqueName: \"kubernetes.io/projected/ef4d4490-6b6b-406b-b626-f135975b6e4a-kube-api-access-dd7dl\") pod \"ef4d4490-6b6b-406b-b626-f135975b6e4a\" (UID: \"ef4d4490-6b6b-406b-b626-f135975b6e4a\") " Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.365734 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36652a1c-6392-4693-86fd-2ec4c2955cd6-operator-scripts\") pod \"36652a1c-6392-4693-86fd-2ec4c2955cd6\" (UID: \"36652a1c-6392-4693-86fd-2ec4c2955cd6\") " Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.365766 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef4d4490-6b6b-406b-b626-f135975b6e4a-operator-scripts\") pod \"ef4d4490-6b6b-406b-b626-f135975b6e4a\" (UID: \"ef4d4490-6b6b-406b-b626-f135975b6e4a\") " Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.365797 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a56ec863-f1f4-48f3-b167-54ec413401f1-operator-scripts\") pod \"a56ec863-f1f4-48f3-b167-54ec413401f1\" (UID: \"a56ec863-f1f4-48f3-b167-54ec413401f1\") " Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.365865 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j2wm2\" (UniqueName: \"kubernetes.io/projected/36652a1c-6392-4693-86fd-2ec4c2955cd6-kube-api-access-j2wm2\") pod \"36652a1c-6392-4693-86fd-2ec4c2955cd6\" (UID: \"36652a1c-6392-4693-86fd-2ec4c2955cd6\") " Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.365910 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4p9wz\" (UniqueName: \"kubernetes.io/projected/a56ec863-f1f4-48f3-b167-54ec413401f1-kube-api-access-4p9wz\") pod \"a56ec863-f1f4-48f3-b167-54ec413401f1\" (UID: \"a56ec863-f1f4-48f3-b167-54ec413401f1\") " Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.366351 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8e1692f-fcdb-4735-b4b4-904fb9c9da85-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.366367 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-868zm\" (UniqueName: \"kubernetes.io/projected/9f197c15-87fa-40c6-9cbf-200c9746aba7-kube-api-access-868zm\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.366381 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f197c15-87fa-40c6-9cbf-200c9746aba7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.366393 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb97w\" (UniqueName: \"kubernetes.io/projected/f8e1692f-fcdb-4735-b4b4-904fb9c9da85-kube-api-access-sb97w\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.367565 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef4d4490-6b6b-406b-b626-f135975b6e4a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ef4d4490-6b6b-406b-b626-f135975b6e4a" (UID: "ef4d4490-6b6b-406b-b626-f135975b6e4a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.369281 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36652a1c-6392-4693-86fd-2ec4c2955cd6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "36652a1c-6392-4693-86fd-2ec4c2955cd6" (UID: "36652a1c-6392-4693-86fd-2ec4c2955cd6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.369989 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a56ec863-f1f4-48f3-b167-54ec413401f1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a56ec863-f1f4-48f3-b167-54ec413401f1" (UID: "a56ec863-f1f4-48f3-b167-54ec413401f1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.374158 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a56ec863-f1f4-48f3-b167-54ec413401f1-kube-api-access-4p9wz" (OuterVolumeSpecName: "kube-api-access-4p9wz") pod "a56ec863-f1f4-48f3-b167-54ec413401f1" (UID: "a56ec863-f1f4-48f3-b167-54ec413401f1"). InnerVolumeSpecName "kube-api-access-4p9wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.374519 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef4d4490-6b6b-406b-b626-f135975b6e4a-kube-api-access-dd7dl" (OuterVolumeSpecName: "kube-api-access-dd7dl") pod "ef4d4490-6b6b-406b-b626-f135975b6e4a" (UID: "ef4d4490-6b6b-406b-b626-f135975b6e4a"). InnerVolumeSpecName "kube-api-access-dd7dl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.375335 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36652a1c-6392-4693-86fd-2ec4c2955cd6-kube-api-access-j2wm2" (OuterVolumeSpecName: "kube-api-access-j2wm2") pod "36652a1c-6392-4693-86fd-2ec4c2955cd6" (UID: "36652a1c-6392-4693-86fd-2ec4c2955cd6"). InnerVolumeSpecName "kube-api-access-j2wm2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.376838 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.403650 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-3b84-account-create-update-wz5gq" event={"ID":"a56ec863-f1f4-48f3-b167-54ec413401f1","Type":"ContainerDied","Data":"2049fa3b7ec26b08bd6273e9eff1e232b177aa480e00bea3ff6afbba959a4586"} Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.403694 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2049fa3b7ec26b08bd6273e9eff1e232b177aa480e00bea3ff6afbba959a4586" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.403765 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-3b84-account-create-update-wz5gq" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.419506 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-4e26-account-create-update-6rmj5" event={"ID":"54b1fab8-ed9d-41f9-bd32-504ea14de7f7","Type":"ContainerDied","Data":"e51d01f7dbb205a009f25015c92e8073a09f74c65d92010e8cc8f86f48f29f73"} Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.419564 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e51d01f7dbb205a009f25015c92e8073a09f74c65d92010e8cc8f86f48f29f73" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.419631 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-4e26-account-create-update-6rmj5" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.426424 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-pnbw4" event={"ID":"36652a1c-6392-4693-86fd-2ec4c2955cd6","Type":"ContainerDied","Data":"9954e5965bb4411dcf7bc2b5f688328dcacb1f55dd3bbf8972267fc7a2187003"} Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.426470 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9954e5965bb4411dcf7bc2b5f688328dcacb1f55dd3bbf8972267fc7a2187003" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.426569 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-pnbw4" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.435953 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5f39-account-create-update-wn6v7" event={"ID":"ef4d4490-6b6b-406b-b626-f135975b6e4a","Type":"ContainerDied","Data":"18bde9963556c8a3c61614e973b7fd2cb45f2d4a7fc161f781f5c36a63f865fc"} Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.435996 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="18bde9963556c8a3c61614e973b7fd2cb45f2d4a7fc161f781f5c36a63f865fc" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.436058 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5f39-account-create-update-wn6v7" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.443339 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-r7k7z" event={"ID":"9f197c15-87fa-40c6-9cbf-200c9746aba7","Type":"ContainerDied","Data":"85837c6b33a2d20d89d5b36d5167e3c47034cf8877207f0632623e6974ac9948"} Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.443377 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="85837c6b33a2d20d89d5b36d5167e3c47034cf8877207f0632623e6974ac9948" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.444374 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-r7k7z" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.447218 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"25606939-d595-4bfc-aead-c40883fdae31","Type":"ContainerStarted","Data":"359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50"} Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.452671 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-btz92" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.453950 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-btz92" event={"ID":"f8e1692f-fcdb-4735-b4b4-904fb9c9da85","Type":"ContainerDied","Data":"9db1842f0798cb319443075465b8bef405cc52f6740b9b9d19c2cae6bb153802"} Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.454218 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9db1842f0798cb319443075465b8bef405cc52f6740b9b9d19c2cae6bb153802" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.454339 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.473191 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7fmsg\" (UniqueName: \"kubernetes.io/projected/54b1fab8-ed9d-41f9-bd32-504ea14de7f7-kube-api-access-7fmsg\") pod \"54b1fab8-ed9d-41f9-bd32-504ea14de7f7\" (UID: \"54b1fab8-ed9d-41f9-bd32-504ea14de7f7\") " Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.473273 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/54b1fab8-ed9d-41f9-bd32-504ea14de7f7-operator-scripts\") pod \"54b1fab8-ed9d-41f9-bd32-504ea14de7f7\" (UID: \"54b1fab8-ed9d-41f9-bd32-504ea14de7f7\") " Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.473822 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j2wm2\" (UniqueName: \"kubernetes.io/projected/36652a1c-6392-4693-86fd-2ec4c2955cd6-kube-api-access-j2wm2\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.473845 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4p9wz\" (UniqueName: \"kubernetes.io/projected/a56ec863-f1f4-48f3-b167-54ec413401f1-kube-api-access-4p9wz\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.473855 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dd7dl\" (UniqueName: \"kubernetes.io/projected/ef4d4490-6b6b-406b-b626-f135975b6e4a-kube-api-access-dd7dl\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.473867 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36652a1c-6392-4693-86fd-2ec4c2955cd6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.473876 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef4d4490-6b6b-406b-b626-f135975b6e4a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.473885 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a56ec863-f1f4-48f3-b167-54ec413401f1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.474440 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=3.215623017 podStartE2EDuration="1m5.474424981s" podCreationTimestamp="2025-11-26 05:41:37 +0000 UTC" firstStartedPulling="2025-11-26 05:41:39.829591656 +0000 UTC m=+958.012643242" lastFinishedPulling="2025-11-26 05:42:42.08839362 +0000 UTC m=+1020.271445206" observedRunningTime="2025-11-26 05:42:42.473127779 +0000 UTC m=+1020.656179365" watchObservedRunningTime="2025-11-26 05:42:42.474424981 +0000 UTC m=+1020.657476567" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.474737 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54b1fab8-ed9d-41f9-bd32-504ea14de7f7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "54b1fab8-ed9d-41f9-bd32-504ea14de7f7" (UID: "54b1fab8-ed9d-41f9-bd32-504ea14de7f7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.480588 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54b1fab8-ed9d-41f9-bd32-504ea14de7f7-kube-api-access-7fmsg" (OuterVolumeSpecName: "kube-api-access-7fmsg") pod "54b1fab8-ed9d-41f9-bd32-504ea14de7f7" (UID: "54b1fab8-ed9d-41f9-bd32-504ea14de7f7"). InnerVolumeSpecName "kube-api-access-7fmsg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.531544 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 26 05:42:42 crc kubenswrapper[4871]: E1126 05:42:42.531788 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef4d4490-6b6b-406b-b626-f135975b6e4a" containerName="mariadb-account-create-update" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.531799 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef4d4490-6b6b-406b-b626-f135975b6e4a" containerName="mariadb-account-create-update" Nov 26 05:42:42 crc kubenswrapper[4871]: E1126 05:42:42.531810 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36652a1c-6392-4693-86fd-2ec4c2955cd6" containerName="mariadb-database-create" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.531816 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="36652a1c-6392-4693-86fd-2ec4c2955cd6" containerName="mariadb-database-create" Nov 26 05:42:42 crc kubenswrapper[4871]: E1126 05:42:42.531837 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a56ec863-f1f4-48f3-b167-54ec413401f1" containerName="mariadb-account-create-update" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.531844 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="a56ec863-f1f4-48f3-b167-54ec413401f1" containerName="mariadb-account-create-update" Nov 26 05:42:42 crc kubenswrapper[4871]: E1126 05:42:42.531854 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f197c15-87fa-40c6-9cbf-200c9746aba7" containerName="mariadb-database-create" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.531860 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f197c15-87fa-40c6-9cbf-200c9746aba7" containerName="mariadb-database-create" Nov 26 05:42:42 crc kubenswrapper[4871]: E1126 05:42:42.531875 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8e1692f-fcdb-4735-b4b4-904fb9c9da85" containerName="mariadb-database-create" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.531880 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8e1692f-fcdb-4735-b4b4-904fb9c9da85" containerName="mariadb-database-create" Nov 26 05:42:42 crc kubenswrapper[4871]: E1126 05:42:42.531893 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54b1fab8-ed9d-41f9-bd32-504ea14de7f7" containerName="mariadb-account-create-update" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.531899 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="54b1fab8-ed9d-41f9-bd32-504ea14de7f7" containerName="mariadb-account-create-update" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.532040 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="a56ec863-f1f4-48f3-b167-54ec413401f1" containerName="mariadb-account-create-update" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.532058 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="36652a1c-6392-4693-86fd-2ec4c2955cd6" containerName="mariadb-database-create" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.532077 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f197c15-87fa-40c6-9cbf-200c9746aba7" containerName="mariadb-database-create" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.532093 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8e1692f-fcdb-4735-b4b4-904fb9c9da85" containerName="mariadb-database-create" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.532104 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="54b1fab8-ed9d-41f9-bd32-504ea14de7f7" containerName="mariadb-account-create-update" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.532119 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef4d4490-6b6b-406b-b626-f135975b6e4a" containerName="mariadb-account-create-update" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.533454 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.537003 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.537145 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.542838 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.543047 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-bjbh8" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.544100 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.575233 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/251bc2ce-32a0-4d94-843b-f7ac83e601f4-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.575307 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/251bc2ce-32a0-4d94-843b-f7ac83e601f4-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.575437 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/251bc2ce-32a0-4d94-843b-f7ac83e601f4-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.575495 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.575560 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pc8vw\" (UniqueName: \"kubernetes.io/projected/251bc2ce-32a0-4d94-843b-f7ac83e601f4-kube-api-access-pc8vw\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.575581 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/251bc2ce-32a0-4d94-843b-f7ac83e601f4-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: E1126 05:42:42.575695 4871 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 05:42:42 crc kubenswrapper[4871]: E1126 05:42:42.575724 4871 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 05:42:42 crc kubenswrapper[4871]: E1126 05:42:42.575767 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift podName:c927c3b8-9d32-4cbb-97cc-d834a6e225c1 nodeName:}" failed. No retries permitted until 2025-11-26 05:42:46.575749557 +0000 UTC m=+1024.758801143 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift") pod "swift-storage-0" (UID: "c927c3b8-9d32-4cbb-97cc-d834a6e225c1") : configmap "swift-ring-files" not found Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.575820 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/251bc2ce-32a0-4d94-843b-f7ac83e601f4-scripts\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.575885 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/251bc2ce-32a0-4d94-843b-f7ac83e601f4-config\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.576096 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7fmsg\" (UniqueName: \"kubernetes.io/projected/54b1fab8-ed9d-41f9-bd32-504ea14de7f7-kube-api-access-7fmsg\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.576236 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/54b1fab8-ed9d-41f9-bd32-504ea14de7f7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.678005 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/251bc2ce-32a0-4d94-843b-f7ac83e601f4-scripts\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.678303 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/251bc2ce-32a0-4d94-843b-f7ac83e601f4-config\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.678417 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/251bc2ce-32a0-4d94-843b-f7ac83e601f4-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.678550 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/251bc2ce-32a0-4d94-843b-f7ac83e601f4-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.679522 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/251bc2ce-32a0-4d94-843b-f7ac83e601f4-config\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.679570 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/251bc2ce-32a0-4d94-843b-f7ac83e601f4-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.679783 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pc8vw\" (UniqueName: \"kubernetes.io/projected/251bc2ce-32a0-4d94-843b-f7ac83e601f4-kube-api-access-pc8vw\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.679848 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/251bc2ce-32a0-4d94-843b-f7ac83e601f4-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.678863 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/251bc2ce-32a0-4d94-843b-f7ac83e601f4-scripts\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.680439 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/251bc2ce-32a0-4d94-843b-f7ac83e601f4-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.683211 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/251bc2ce-32a0-4d94-843b-f7ac83e601f4-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.684010 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/251bc2ce-32a0-4d94-843b-f7ac83e601f4-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.691494 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/251bc2ce-32a0-4d94-843b-f7ac83e601f4-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.695896 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pc8vw\" (UniqueName: \"kubernetes.io/projected/251bc2ce-32a0-4d94-843b-f7ac83e601f4-kube-api-access-pc8vw\") pod \"ovn-northd-0\" (UID: \"251bc2ce-32a0-4d94-843b-f7ac83e601f4\") " pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.807787 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-jh4db"] Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.809168 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.811279 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.811562 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.819992 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-jh4db"] Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.825819 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.843343 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-qtnpf"] Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.846066 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.862024 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.885471 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-scripts\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.885541 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/939311f9-c955-44c9-b1fd-e1526eb4da20-swiftconf\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.885577 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/939311f9-c955-44c9-b1fd-e1526eb4da20-dispersionconf\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.885596 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-combined-ca-bundle\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.886120 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/939311f9-c955-44c9-b1fd-e1526eb4da20-etc-swift\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.886171 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-swiftconf\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.886209 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-etc-swift\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.886232 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/939311f9-c955-44c9-b1fd-e1526eb4da20-combined-ca-bundle\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.886253 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-dispersionconf\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.886276 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pl2jn\" (UniqueName: \"kubernetes.io/projected/939311f9-c955-44c9-b1fd-e1526eb4da20-kube-api-access-pl2jn\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.886300 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbsc2\" (UniqueName: \"kubernetes.io/projected/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-kube-api-access-mbsc2\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.886360 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/939311f9-c955-44c9-b1fd-e1526eb4da20-scripts\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.886385 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-ring-data-devices\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.886414 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/939311f9-c955-44c9-b1fd-e1526eb4da20-ring-data-devices\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.887760 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-jh4db"] Nov 26 05:42:42 crc kubenswrapper[4871]: E1126 05:42:42.892363 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-pl2jn ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/swift-ring-rebalance-jh4db" podUID="939311f9-c955-44c9-b1fd-e1526eb4da20" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.899307 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-qtnpf"] Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.989700 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-etc-swift\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.989741 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/939311f9-c955-44c9-b1fd-e1526eb4da20-combined-ca-bundle\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.989758 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-dispersionconf\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.989781 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pl2jn\" (UniqueName: \"kubernetes.io/projected/939311f9-c955-44c9-b1fd-e1526eb4da20-kube-api-access-pl2jn\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.989806 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mbsc2\" (UniqueName: \"kubernetes.io/projected/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-kube-api-access-mbsc2\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.989868 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/939311f9-c955-44c9-b1fd-e1526eb4da20-scripts\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.989887 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-ring-data-devices\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.989913 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/939311f9-c955-44c9-b1fd-e1526eb4da20-ring-data-devices\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.989967 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-scripts\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.989989 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/939311f9-c955-44c9-b1fd-e1526eb4da20-swiftconf\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.990011 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/939311f9-c955-44c9-b1fd-e1526eb4da20-dispersionconf\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.990028 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-combined-ca-bundle\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.990057 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/939311f9-c955-44c9-b1fd-e1526eb4da20-etc-swift\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.990077 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-swiftconf\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.990170 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-etc-swift\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.990919 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-ring-data-devices\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.991504 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/939311f9-c955-44c9-b1fd-e1526eb4da20-etc-swift\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.991934 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-scripts\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.991955 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/939311f9-c955-44c9-b1fd-e1526eb4da20-ring-data-devices\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.992648 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/939311f9-c955-44c9-b1fd-e1526eb4da20-scripts\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.996752 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/939311f9-c955-44c9-b1fd-e1526eb4da20-dispersionconf\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:42 crc kubenswrapper[4871]: I1126 05:42:42.996828 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-dispersionconf\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.005342 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/939311f9-c955-44c9-b1fd-e1526eb4da20-swiftconf\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.005354 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-swiftconf\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.005545 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-combined-ca-bundle\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.008523 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/939311f9-c955-44c9-b1fd-e1526eb4da20-combined-ca-bundle\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.009354 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pl2jn\" (UniqueName: \"kubernetes.io/projected/939311f9-c955-44c9-b1fd-e1526eb4da20-kube-api-access-pl2jn\") pod \"swift-ring-rebalance-jh4db\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.010203 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbsc2\" (UniqueName: \"kubernetes.io/projected/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-kube-api-access-mbsc2\") pod \"swift-ring-rebalance-qtnpf\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.144387 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 05:42:43 crc kubenswrapper[4871]: W1126 05:42:43.144996 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod251bc2ce_32a0_4d94_843b_f7ac83e601f4.slice/crio-a589d12b7420fb089f550509b87582c72618631b4dfdcca52c459ca3fc25253d WatchSource:0}: Error finding container a589d12b7420fb089f550509b87582c72618631b4dfdcca52c459ca3fc25253d: Status 404 returned error can't find the container with id a589d12b7420fb089f550509b87582c72618631b4dfdcca52c459ca3fc25253d Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.181129 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-4jdrz" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.189660 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.478909 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"251bc2ce-32a0-4d94-843b-f7ac83e601f4","Type":"ContainerStarted","Data":"a589d12b7420fb089f550509b87582c72618631b4dfdcca52c459ca3fc25253d"} Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.479440 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.489566 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.499259 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/939311f9-c955-44c9-b1fd-e1526eb4da20-etc-swift\") pod \"939311f9-c955-44c9-b1fd-e1526eb4da20\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.499320 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pl2jn\" (UniqueName: \"kubernetes.io/projected/939311f9-c955-44c9-b1fd-e1526eb4da20-kube-api-access-pl2jn\") pod \"939311f9-c955-44c9-b1fd-e1526eb4da20\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.499409 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/939311f9-c955-44c9-b1fd-e1526eb4da20-swiftconf\") pod \"939311f9-c955-44c9-b1fd-e1526eb4da20\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.499472 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/939311f9-c955-44c9-b1fd-e1526eb4da20-ring-data-devices\") pod \"939311f9-c955-44c9-b1fd-e1526eb4da20\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.499560 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/939311f9-c955-44c9-b1fd-e1526eb4da20-scripts\") pod \"939311f9-c955-44c9-b1fd-e1526eb4da20\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.499613 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/939311f9-c955-44c9-b1fd-e1526eb4da20-dispersionconf\") pod \"939311f9-c955-44c9-b1fd-e1526eb4da20\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.499716 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/939311f9-c955-44c9-b1fd-e1526eb4da20-combined-ca-bundle\") pod \"939311f9-c955-44c9-b1fd-e1526eb4da20\" (UID: \"939311f9-c955-44c9-b1fd-e1526eb4da20\") " Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.500158 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/939311f9-c955-44c9-b1fd-e1526eb4da20-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "939311f9-c955-44c9-b1fd-e1526eb4da20" (UID: "939311f9-c955-44c9-b1fd-e1526eb4da20"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.500488 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/939311f9-c955-44c9-b1fd-e1526eb4da20-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "939311f9-c955-44c9-b1fd-e1526eb4da20" (UID: "939311f9-c955-44c9-b1fd-e1526eb4da20"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.500612 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/939311f9-c955-44c9-b1fd-e1526eb4da20-scripts" (OuterVolumeSpecName: "scripts") pod "939311f9-c955-44c9-b1fd-e1526eb4da20" (UID: "939311f9-c955-44c9-b1fd-e1526eb4da20"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.502154 4871 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/939311f9-c955-44c9-b1fd-e1526eb4da20-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.502299 4871 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/939311f9-c955-44c9-b1fd-e1526eb4da20-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.502350 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/939311f9-c955-44c9-b1fd-e1526eb4da20-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.503837 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/939311f9-c955-44c9-b1fd-e1526eb4da20-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "939311f9-c955-44c9-b1fd-e1526eb4da20" (UID: "939311f9-c955-44c9-b1fd-e1526eb4da20"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.504663 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/939311f9-c955-44c9-b1fd-e1526eb4da20-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "939311f9-c955-44c9-b1fd-e1526eb4da20" (UID: "939311f9-c955-44c9-b1fd-e1526eb4da20"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.505819 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/939311f9-c955-44c9-b1fd-e1526eb4da20-kube-api-access-pl2jn" (OuterVolumeSpecName: "kube-api-access-pl2jn") pod "939311f9-c955-44c9-b1fd-e1526eb4da20" (UID: "939311f9-c955-44c9-b1fd-e1526eb4da20"). InnerVolumeSpecName "kube-api-access-pl2jn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.523116 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/939311f9-c955-44c9-b1fd-e1526eb4da20-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "939311f9-c955-44c9-b1fd-e1526eb4da20" (UID: "939311f9-c955-44c9-b1fd-e1526eb4da20"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.611473 4871 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/939311f9-c955-44c9-b1fd-e1526eb4da20-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.611526 4871 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/939311f9-c955-44c9-b1fd-e1526eb4da20-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.611556 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/939311f9-c955-44c9-b1fd-e1526eb4da20-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.611566 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pl2jn\" (UniqueName: \"kubernetes.io/projected/939311f9-c955-44c9-b1fd-e1526eb4da20-kube-api-access-pl2jn\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.647966 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-qtnpf"] Nov 26 05:42:43 crc kubenswrapper[4871]: I1126 05:42:43.941082 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:44 crc kubenswrapper[4871]: W1126 05:42:44.025553 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd92bcfc_31b0_4ec0_853f_0e2bbfc2c53d.slice/crio-0850eb4751d8434ea409f941aed0ea0ec0abbd1f4a701643db6a50dce437b513 WatchSource:0}: Error finding container 0850eb4751d8434ea409f941aed0ea0ec0abbd1f4a701643db6a50dce437b513: Status 404 returned error can't find the container with id 0850eb4751d8434ea409f941aed0ea0ec0abbd1f4a701643db6a50dce437b513 Nov 26 05:42:44 crc kubenswrapper[4871]: I1126 05:42:44.489117 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"251bc2ce-32a0-4d94-843b-f7ac83e601f4","Type":"ContainerStarted","Data":"25a60329e3e631d9badc2c9bd9f17152238257fef486a58416a57d33e7bf83ab"} Nov 26 05:42:44 crc kubenswrapper[4871]: I1126 05:42:44.489358 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 26 05:42:44 crc kubenswrapper[4871]: I1126 05:42:44.489367 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"251bc2ce-32a0-4d94-843b-f7ac83e601f4","Type":"ContainerStarted","Data":"2758d0bbbcae5cf2de92996c78c8059900c9a85e82309b662607f8c13ed11833"} Nov 26 05:42:44 crc kubenswrapper[4871]: I1126 05:42:44.490783 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-qtnpf" event={"ID":"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d","Type":"ContainerStarted","Data":"0850eb4751d8434ea409f941aed0ea0ec0abbd1f4a701643db6a50dce437b513"} Nov 26 05:42:44 crc kubenswrapper[4871]: I1126 05:42:44.490830 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-jh4db" Nov 26 05:42:44 crc kubenswrapper[4871]: I1126 05:42:44.507263 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=1.5971222360000001 podStartE2EDuration="2.507243874s" podCreationTimestamp="2025-11-26 05:42:42 +0000 UTC" firstStartedPulling="2025-11-26 05:42:43.151090783 +0000 UTC m=+1021.334142369" lastFinishedPulling="2025-11-26 05:42:44.061212421 +0000 UTC m=+1022.244264007" observedRunningTime="2025-11-26 05:42:44.505638265 +0000 UTC m=+1022.688689851" watchObservedRunningTime="2025-11-26 05:42:44.507243874 +0000 UTC m=+1022.690295460" Nov 26 05:42:44 crc kubenswrapper[4871]: I1126 05:42:44.588256 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-jh4db"] Nov 26 05:42:44 crc kubenswrapper[4871]: I1126 05:42:44.596932 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-jh4db"] Nov 26 05:42:46 crc kubenswrapper[4871]: I1126 05:42:46.520983 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="939311f9-c955-44c9-b1fd-e1526eb4da20" path="/var/lib/kubelet/pods/939311f9-c955-44c9-b1fd-e1526eb4da20/volumes" Nov 26 05:42:46 crc kubenswrapper[4871]: I1126 05:42:46.665466 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:42:46 crc kubenswrapper[4871]: E1126 05:42:46.665940 4871 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 05:42:46 crc kubenswrapper[4871]: E1126 05:42:46.666012 4871 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 05:42:46 crc kubenswrapper[4871]: E1126 05:42:46.666108 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift podName:c927c3b8-9d32-4cbb-97cc-d834a6e225c1 nodeName:}" failed. No retries permitted until 2025-11-26 05:42:54.666094331 +0000 UTC m=+1032.849145917 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift") pod "swift-storage-0" (UID: "c927c3b8-9d32-4cbb-97cc-d834a6e225c1") : configmap "swift-ring-files" not found Nov 26 05:42:46 crc kubenswrapper[4871]: I1126 05:42:46.729158 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-m255d" podUID="de8a947b-6c51-4c33-b221-ea16d851bafb" containerName="ovn-controller" probeResult="failure" output=< Nov 26 05:42:46 crc kubenswrapper[4871]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 26 05:42:46 crc kubenswrapper[4871]: > Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.104770 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.159585 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6dbf544cc9-jksqz"] Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.159816 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" podUID="45374a1a-cee6-4d53-8643-e185d317425c" containerName="dnsmasq-dns" containerID="cri-o://49f07507965162e0c729182081ec6ddd43e73e586894f8531b291f7a63fea130" gracePeriod=10 Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.574777 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-qtnpf" event={"ID":"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d","Type":"ContainerStarted","Data":"02262207fca42021f1b9f3242307b877a6704d999af11f48a032b093dd15ac64"} Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.577612 4871 generic.go:334] "Generic (PLEG): container finished" podID="45374a1a-cee6-4d53-8643-e185d317425c" containerID="49f07507965162e0c729182081ec6ddd43e73e586894f8531b291f7a63fea130" exitCode=0 Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.577645 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" event={"ID":"45374a1a-cee6-4d53-8643-e185d317425c","Type":"ContainerDied","Data":"49f07507965162e0c729182081ec6ddd43e73e586894f8531b291f7a63fea130"} Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.606889 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-qtnpf" podStartSLOduration=2.98676624 podStartE2EDuration="6.606868717s" podCreationTimestamp="2025-11-26 05:42:42 +0000 UTC" firstStartedPulling="2025-11-26 05:42:44.028259793 +0000 UTC m=+1022.211311379" lastFinishedPulling="2025-11-26 05:42:47.64836226 +0000 UTC m=+1025.831413856" observedRunningTime="2025-11-26 05:42:48.601221698 +0000 UTC m=+1026.784273284" watchObservedRunningTime="2025-11-26 05:42:48.606868717 +0000 UTC m=+1026.789920303" Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.640344 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.712362 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-ovsdbserver-nb\") pod \"45374a1a-cee6-4d53-8643-e185d317425c\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.712449 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-ovsdbserver-sb\") pod \"45374a1a-cee6-4d53-8643-e185d317425c\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.712486 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-dns-svc\") pod \"45374a1a-cee6-4d53-8643-e185d317425c\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.712573 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-config\") pod \"45374a1a-cee6-4d53-8643-e185d317425c\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.712622 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-87qhv\" (UniqueName: \"kubernetes.io/projected/45374a1a-cee6-4d53-8643-e185d317425c-kube-api-access-87qhv\") pod \"45374a1a-cee6-4d53-8643-e185d317425c\" (UID: \"45374a1a-cee6-4d53-8643-e185d317425c\") " Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.719684 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45374a1a-cee6-4d53-8643-e185d317425c-kube-api-access-87qhv" (OuterVolumeSpecName: "kube-api-access-87qhv") pod "45374a1a-cee6-4d53-8643-e185d317425c" (UID: "45374a1a-cee6-4d53-8643-e185d317425c"). InnerVolumeSpecName "kube-api-access-87qhv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.756451 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-config" (OuterVolumeSpecName: "config") pod "45374a1a-cee6-4d53-8643-e185d317425c" (UID: "45374a1a-cee6-4d53-8643-e185d317425c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.757730 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "45374a1a-cee6-4d53-8643-e185d317425c" (UID: "45374a1a-cee6-4d53-8643-e185d317425c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.763290 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "45374a1a-cee6-4d53-8643-e185d317425c" (UID: "45374a1a-cee6-4d53-8643-e185d317425c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.766992 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "45374a1a-cee6-4d53-8643-e185d317425c" (UID: "45374a1a-cee6-4d53-8643-e185d317425c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.814193 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.814233 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.814246 4871 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.814257 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/45374a1a-cee6-4d53-8643-e185d317425c-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:48 crc kubenswrapper[4871]: I1126 05:42:48.814270 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-87qhv\" (UniqueName: \"kubernetes.io/projected/45374a1a-cee6-4d53-8643-e185d317425c-kube-api-access-87qhv\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:49 crc kubenswrapper[4871]: I1126 05:42:49.588165 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" event={"ID":"45374a1a-cee6-4d53-8643-e185d317425c","Type":"ContainerDied","Data":"4b93502a10967a0932b4359ea76cba35182f47a0a22d86eab0053b65b4b5e525"} Nov 26 05:42:49 crc kubenswrapper[4871]: I1126 05:42:49.588216 4871 scope.go:117] "RemoveContainer" containerID="49f07507965162e0c729182081ec6ddd43e73e586894f8531b291f7a63fea130" Nov 26 05:42:49 crc kubenswrapper[4871]: I1126 05:42:49.588316 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dbf544cc9-jksqz" Nov 26 05:42:49 crc kubenswrapper[4871]: I1126 05:42:49.607726 4871 scope.go:117] "RemoveContainer" containerID="fa1e052a86550a87879cd7603a6d28c7911cb9d22b21c4d059d3d0f3c9bbefe1" Nov 26 05:42:49 crc kubenswrapper[4871]: I1126 05:42:49.626547 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6dbf544cc9-jksqz"] Nov 26 05:42:49 crc kubenswrapper[4871]: I1126 05:42:49.636669 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6dbf544cc9-jksqz"] Nov 26 05:42:50 crc kubenswrapper[4871]: I1126 05:42:50.527634 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45374a1a-cee6-4d53-8643-e185d317425c" path="/var/lib/kubelet/pods/45374a1a-cee6-4d53-8643-e185d317425c/volumes" Nov 26 05:42:51 crc kubenswrapper[4871]: I1126 05:42:51.734762 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-m255d" podUID="de8a947b-6c51-4c33-b221-ea16d851bafb" containerName="ovn-controller" probeResult="failure" output=< Nov 26 05:42:51 crc kubenswrapper[4871]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 26 05:42:51 crc kubenswrapper[4871]: > Nov 26 05:42:53 crc kubenswrapper[4871]: I1126 05:42:53.929860 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:53 crc kubenswrapper[4871]: I1126 05:42:53.935420 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:54 crc kubenswrapper[4871]: I1126 05:42:54.657070 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:54 crc kubenswrapper[4871]: I1126 05:42:54.708572 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:42:54 crc kubenswrapper[4871]: E1126 05:42:54.708861 4871 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 05:42:54 crc kubenswrapper[4871]: E1126 05:42:54.708892 4871 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 05:42:54 crc kubenswrapper[4871]: E1126 05:42:54.708958 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift podName:c927c3b8-9d32-4cbb-97cc-d834a6e225c1 nodeName:}" failed. No retries permitted until 2025-11-26 05:43:10.708934888 +0000 UTC m=+1048.891986514 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift") pod "swift-storage-0" (UID: "c927c3b8-9d32-4cbb-97cc-d834a6e225c1") : configmap "swift-ring-files" not found Nov 26 05:42:55 crc kubenswrapper[4871]: I1126 05:42:55.663546 4871 generic.go:334] "Generic (PLEG): container finished" podID="bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d" containerID="02262207fca42021f1b9f3242307b877a6704d999af11f48a032b093dd15ac64" exitCode=0 Nov 26 05:42:55 crc kubenswrapper[4871]: I1126 05:42:55.663632 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-qtnpf" event={"ID":"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d","Type":"ContainerDied","Data":"02262207fca42021f1b9f3242307b877a6704d999af11f48a032b093dd15ac64"} Nov 26 05:42:56 crc kubenswrapper[4871]: I1126 05:42:56.523118 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 05:42:56 crc kubenswrapper[4871]: I1126 05:42:56.670585 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="25606939-d595-4bfc-aead-c40883fdae31" containerName="prometheus" containerID="cri-o://022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a" gracePeriod=600 Nov 26 05:42:56 crc kubenswrapper[4871]: I1126 05:42:56.670694 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="25606939-d595-4bfc-aead-c40883fdae31" containerName="thanos-sidecar" containerID="cri-o://359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50" gracePeriod=600 Nov 26 05:42:56 crc kubenswrapper[4871]: I1126 05:42:56.670712 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="25606939-d595-4bfc-aead-c40883fdae31" containerName="config-reloader" containerID="cri-o://914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983" gracePeriod=600 Nov 26 05:42:56 crc kubenswrapper[4871]: I1126 05:42:56.722464 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-m255d" podUID="de8a947b-6c51-4c33-b221-ea16d851bafb" containerName="ovn-controller" probeResult="failure" output=< Nov 26 05:42:56 crc kubenswrapper[4871]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 26 05:42:56 crc kubenswrapper[4871]: > Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.107385 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.215469 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.249691 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-dispersionconf\") pod \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.249749 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-scripts\") pod \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.249919 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-ring-data-devices\") pod \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.249983 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mbsc2\" (UniqueName: \"kubernetes.io/projected/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-kube-api-access-mbsc2\") pod \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.250029 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-combined-ca-bundle\") pod \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.250049 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-etc-swift\") pod \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.250492 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-swiftconf\") pod \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\" (UID: \"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d\") " Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.250732 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d" (UID: "bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.251654 4871 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.255854 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-kube-api-access-mbsc2" (OuterVolumeSpecName: "kube-api-access-mbsc2") pod "bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d" (UID: "bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d"). InnerVolumeSpecName "kube-api-access-mbsc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.260865 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d" (UID: "bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.265444 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d" (UID: "bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.273339 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-scripts" (OuterVolumeSpecName: "scripts") pod "bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d" (UID: "bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.281728 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d" (UID: "bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.283744 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d" (UID: "bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.352440 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/25606939-d595-4bfc-aead-c40883fdae31-tls-assets\") pod \"25606939-d595-4bfc-aead-c40883fdae31\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.352503 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/25606939-d595-4bfc-aead-c40883fdae31-web-config\") pod \"25606939-d595-4bfc-aead-c40883fdae31\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.352670 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\") pod \"25606939-d595-4bfc-aead-c40883fdae31\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.352723 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grwwg\" (UniqueName: \"kubernetes.io/projected/25606939-d595-4bfc-aead-c40883fdae31-kube-api-access-grwwg\") pod \"25606939-d595-4bfc-aead-c40883fdae31\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.352750 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/25606939-d595-4bfc-aead-c40883fdae31-config\") pod \"25606939-d595-4bfc-aead-c40883fdae31\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.352800 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/25606939-d595-4bfc-aead-c40883fdae31-config-out\") pod \"25606939-d595-4bfc-aead-c40883fdae31\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.352828 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/25606939-d595-4bfc-aead-c40883fdae31-thanos-prometheus-http-client-file\") pod \"25606939-d595-4bfc-aead-c40883fdae31\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.352861 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/25606939-d595-4bfc-aead-c40883fdae31-prometheus-metric-storage-rulefiles-0\") pod \"25606939-d595-4bfc-aead-c40883fdae31\" (UID: \"25606939-d595-4bfc-aead-c40883fdae31\") " Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.353205 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mbsc2\" (UniqueName: \"kubernetes.io/projected/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-kube-api-access-mbsc2\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.353223 4871 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.353232 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.353241 4871 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.353249 4871 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.353258 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.353552 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25606939-d595-4bfc-aead-c40883fdae31-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "25606939-d595-4bfc-aead-c40883fdae31" (UID: "25606939-d595-4bfc-aead-c40883fdae31"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.355589 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25606939-d595-4bfc-aead-c40883fdae31-kube-api-access-grwwg" (OuterVolumeSpecName: "kube-api-access-grwwg") pod "25606939-d595-4bfc-aead-c40883fdae31" (UID: "25606939-d595-4bfc-aead-c40883fdae31"). InnerVolumeSpecName "kube-api-access-grwwg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.356292 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25606939-d595-4bfc-aead-c40883fdae31-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "25606939-d595-4bfc-aead-c40883fdae31" (UID: "25606939-d595-4bfc-aead-c40883fdae31"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.359920 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/25606939-d595-4bfc-aead-c40883fdae31-config-out" (OuterVolumeSpecName: "config-out") pod "25606939-d595-4bfc-aead-c40883fdae31" (UID: "25606939-d595-4bfc-aead-c40883fdae31"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.361780 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25606939-d595-4bfc-aead-c40883fdae31-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "25606939-d595-4bfc-aead-c40883fdae31" (UID: "25606939-d595-4bfc-aead-c40883fdae31"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.361941 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25606939-d595-4bfc-aead-c40883fdae31-config" (OuterVolumeSpecName: "config") pod "25606939-d595-4bfc-aead-c40883fdae31" (UID: "25606939-d595-4bfc-aead-c40883fdae31"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.386390 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "25606939-d595-4bfc-aead-c40883fdae31" (UID: "25606939-d595-4bfc-aead-c40883fdae31"). InnerVolumeSpecName "pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.387942 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25606939-d595-4bfc-aead-c40883fdae31-web-config" (OuterVolumeSpecName: "web-config") pod "25606939-d595-4bfc-aead-c40883fdae31" (UID: "25606939-d595-4bfc-aead-c40883fdae31"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.455317 4871 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\") on node \"crc\" " Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.455363 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-grwwg\" (UniqueName: \"kubernetes.io/projected/25606939-d595-4bfc-aead-c40883fdae31-kube-api-access-grwwg\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.455377 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/25606939-d595-4bfc-aead-c40883fdae31-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.455387 4871 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/25606939-d595-4bfc-aead-c40883fdae31-config-out\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.455401 4871 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/25606939-d595-4bfc-aead-c40883fdae31-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.455417 4871 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/25606939-d595-4bfc-aead-c40883fdae31-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.455429 4871 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/25606939-d595-4bfc-aead-c40883fdae31-tls-assets\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.455442 4871 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/25606939-d595-4bfc-aead-c40883fdae31-web-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.474293 4871 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.474448 4871 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9") on node "crc" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.556609 4871 reconciler_common.go:293] "Volume detached for volume \"pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\") on node \"crc\" DevicePath \"\"" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.683003 4871 generic.go:334] "Generic (PLEG): container finished" podID="25606939-d595-4bfc-aead-c40883fdae31" containerID="359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50" exitCode=0 Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.683077 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.683089 4871 generic.go:334] "Generic (PLEG): container finished" podID="25606939-d595-4bfc-aead-c40883fdae31" containerID="914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983" exitCode=0 Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.683167 4871 generic.go:334] "Generic (PLEG): container finished" podID="25606939-d595-4bfc-aead-c40883fdae31" containerID="022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a" exitCode=0 Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.683071 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"25606939-d595-4bfc-aead-c40883fdae31","Type":"ContainerDied","Data":"359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50"} Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.683289 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"25606939-d595-4bfc-aead-c40883fdae31","Type":"ContainerDied","Data":"914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983"} Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.683354 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"25606939-d595-4bfc-aead-c40883fdae31","Type":"ContainerDied","Data":"022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a"} Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.683374 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"25606939-d595-4bfc-aead-c40883fdae31","Type":"ContainerDied","Data":"60ba71b9378e1b112d33eaa60fc405b0daad5296a45194e8ce4fd351c000d16d"} Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.683412 4871 scope.go:117] "RemoveContainer" containerID="359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.686324 4871 generic.go:334] "Generic (PLEG): container finished" podID="b3f9dfba-a3a9-45ef-a96c-91c654671b97" containerID="7a6e02e0a5f846c42f26119c46b3293cb428ccde51bfa28d8eb0ed9051685927" exitCode=0 Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.686383 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b3f9dfba-a3a9-45ef-a96c-91c654671b97","Type":"ContainerDied","Data":"7a6e02e0a5f846c42f26119c46b3293cb428ccde51bfa28d8eb0ed9051685927"} Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.690107 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-qtnpf" event={"ID":"bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d","Type":"ContainerDied","Data":"0850eb4751d8434ea409f941aed0ea0ec0abbd1f4a701643db6a50dce437b513"} Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.690140 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-qtnpf" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.690155 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0850eb4751d8434ea409f941aed0ea0ec0abbd1f4a701643db6a50dce437b513" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.715317 4871 scope.go:117] "RemoveContainer" containerID="914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.744830 4871 scope.go:117] "RemoveContainer" containerID="022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.775467 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.784931 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.788566 4871 scope.go:117] "RemoveContainer" containerID="b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.799038 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 05:42:57 crc kubenswrapper[4871]: E1126 05:42:57.799454 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45374a1a-cee6-4d53-8643-e185d317425c" containerName="dnsmasq-dns" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.799476 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="45374a1a-cee6-4d53-8643-e185d317425c" containerName="dnsmasq-dns" Nov 26 05:42:57 crc kubenswrapper[4871]: E1126 05:42:57.799490 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25606939-d595-4bfc-aead-c40883fdae31" containerName="init-config-reloader" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.799500 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="25606939-d595-4bfc-aead-c40883fdae31" containerName="init-config-reloader" Nov 26 05:42:57 crc kubenswrapper[4871]: E1126 05:42:57.799545 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25606939-d595-4bfc-aead-c40883fdae31" containerName="config-reloader" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.799555 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="25606939-d595-4bfc-aead-c40883fdae31" containerName="config-reloader" Nov 26 05:42:57 crc kubenswrapper[4871]: E1126 05:42:57.799567 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25606939-d595-4bfc-aead-c40883fdae31" containerName="prometheus" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.799576 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="25606939-d595-4bfc-aead-c40883fdae31" containerName="prometheus" Nov 26 05:42:57 crc kubenswrapper[4871]: E1126 05:42:57.799602 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25606939-d595-4bfc-aead-c40883fdae31" containerName="thanos-sidecar" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.799609 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="25606939-d595-4bfc-aead-c40883fdae31" containerName="thanos-sidecar" Nov 26 05:42:57 crc kubenswrapper[4871]: E1126 05:42:57.799623 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d" containerName="swift-ring-rebalance" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.799631 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d" containerName="swift-ring-rebalance" Nov 26 05:42:57 crc kubenswrapper[4871]: E1126 05:42:57.799641 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45374a1a-cee6-4d53-8643-e185d317425c" containerName="init" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.799650 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="45374a1a-cee6-4d53-8643-e185d317425c" containerName="init" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.799879 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="25606939-d595-4bfc-aead-c40883fdae31" containerName="prometheus" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.799908 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="25606939-d595-4bfc-aead-c40883fdae31" containerName="thanos-sidecar" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.799924 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d" containerName="swift-ring-rebalance" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.799943 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="25606939-d595-4bfc-aead-c40883fdae31" containerName="config-reloader" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.799967 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="45374a1a-cee6-4d53-8643-e185d317425c" containerName="dnsmasq-dns" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.802049 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.808264 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.808507 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.808699 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-qb6hl" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.808854 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.809068 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.809229 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.813940 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.831151 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.909337 4871 scope.go:117] "RemoveContainer" containerID="359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50" Nov 26 05:42:57 crc kubenswrapper[4871]: E1126 05:42:57.909787 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50\": container with ID starting with 359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50 not found: ID does not exist" containerID="359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.909825 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50"} err="failed to get container status \"359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50\": rpc error: code = NotFound desc = could not find container \"359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50\": container with ID starting with 359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50 not found: ID does not exist" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.909868 4871 scope.go:117] "RemoveContainer" containerID="914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983" Nov 26 05:42:57 crc kubenswrapper[4871]: E1126 05:42:57.910179 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983\": container with ID starting with 914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983 not found: ID does not exist" containerID="914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.910312 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983"} err="failed to get container status \"914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983\": rpc error: code = NotFound desc = could not find container \"914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983\": container with ID starting with 914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983 not found: ID does not exist" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.910426 4871 scope.go:117] "RemoveContainer" containerID="022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a" Nov 26 05:42:57 crc kubenswrapper[4871]: E1126 05:42:57.910832 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a\": container with ID starting with 022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a not found: ID does not exist" containerID="022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.910916 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a"} err="failed to get container status \"022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a\": rpc error: code = NotFound desc = could not find container \"022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a\": container with ID starting with 022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a not found: ID does not exist" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.911002 4871 scope.go:117] "RemoveContainer" containerID="b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f" Nov 26 05:42:57 crc kubenswrapper[4871]: E1126 05:42:57.911461 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f\": container with ID starting with b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f not found: ID does not exist" containerID="b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.911562 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f"} err="failed to get container status \"b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f\": rpc error: code = NotFound desc = could not find container \"b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f\": container with ID starting with b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f not found: ID does not exist" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.911584 4871 scope.go:117] "RemoveContainer" containerID="359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.911896 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50"} err="failed to get container status \"359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50\": rpc error: code = NotFound desc = could not find container \"359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50\": container with ID starting with 359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50 not found: ID does not exist" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.911996 4871 scope.go:117] "RemoveContainer" containerID="914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.912627 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983"} err="failed to get container status \"914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983\": rpc error: code = NotFound desc = could not find container \"914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983\": container with ID starting with 914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983 not found: ID does not exist" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.912649 4871 scope.go:117] "RemoveContainer" containerID="022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.917094 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a"} err="failed to get container status \"022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a\": rpc error: code = NotFound desc = could not find container \"022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a\": container with ID starting with 022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a not found: ID does not exist" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.917225 4871 scope.go:117] "RemoveContainer" containerID="b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.917683 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f"} err="failed to get container status \"b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f\": rpc error: code = NotFound desc = could not find container \"b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f\": container with ID starting with b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f not found: ID does not exist" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.917725 4871 scope.go:117] "RemoveContainer" containerID="359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.917986 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50"} err="failed to get container status \"359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50\": rpc error: code = NotFound desc = could not find container \"359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50\": container with ID starting with 359caf6a4ba5b6ef205778bcd4024ddc662b325303d2969ea9979a35799e9d50 not found: ID does not exist" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.918074 4871 scope.go:117] "RemoveContainer" containerID="914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.918351 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983"} err="failed to get container status \"914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983\": rpc error: code = NotFound desc = could not find container \"914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983\": container with ID starting with 914dbad179d75235b029ab2b57b36b1683be4ff4b89ace19f092f592d4600983 not found: ID does not exist" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.918454 4871 scope.go:117] "RemoveContainer" containerID="022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.918838 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a"} err="failed to get container status \"022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a\": rpc error: code = NotFound desc = could not find container \"022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a\": container with ID starting with 022fe0cb9c527f4427d5cfbb135d965667c4aec8ae1f1552b12c8844c979c20a not found: ID does not exist" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.918920 4871 scope.go:117] "RemoveContainer" containerID="b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.919456 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f"} err="failed to get container status \"b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f\": rpc error: code = NotFound desc = could not find container \"b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f\": container with ID starting with b4750d39a182d7d79e868fb6056c4674dcba58cd98054dc3917b9ff623bc1c3f not found: ID does not exist" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.926872 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.964164 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.964470 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-config\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.964590 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbtnm\" (UniqueName: \"kubernetes.io/projected/a58d8ebe-c4cb-45c2-8529-d1094ee56518-kube-api-access-dbtnm\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.964720 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/a58d8ebe-c4cb-45c2-8529-d1094ee56518-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.964851 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/a58d8ebe-c4cb-45c2-8529-d1094ee56518-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.964977 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/a58d8ebe-c4cb-45c2-8529-d1094ee56518-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.965075 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.965192 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.965327 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.965431 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:57 crc kubenswrapper[4871]: I1126 05:42:57.965592 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.068313 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-config\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.068676 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbtnm\" (UniqueName: \"kubernetes.io/projected/a58d8ebe-c4cb-45c2-8529-d1094ee56518-kube-api-access-dbtnm\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.068795 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/a58d8ebe-c4cb-45c2-8529-d1094ee56518-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.068907 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/a58d8ebe-c4cb-45c2-8529-d1094ee56518-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.069593 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/a58d8ebe-c4cb-45c2-8529-d1094ee56518-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.069709 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/a58d8ebe-c4cb-45c2-8529-d1094ee56518-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.069802 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.070242 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.070737 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.070858 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.070991 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.071096 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.074120 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.074341 4871 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.074388 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/411f5d2e0132cbadcdbc80898abccb6eaaa272fad7576dd15cdb4f42514f558a/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.074895 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.075980 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-config\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.083789 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/a58d8ebe-c4cb-45c2-8529-d1094ee56518-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.084245 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.084586 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.084828 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/a58d8ebe-c4cb-45c2-8529-d1094ee56518-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.088115 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.109750 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbtnm\" (UniqueName: \"kubernetes.io/projected/a58d8ebe-c4cb-45c2-8529-d1094ee56518-kube-api-access-dbtnm\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.121274 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\") pod \"prometheus-metric-storage-0\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:58 crc kubenswrapper[4871]: I1126 05:42:58.257423 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 05:42:59 crc kubenswrapper[4871]: I1126 05:42:58.522113 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25606939-d595-4bfc-aead-c40883fdae31" path="/var/lib/kubelet/pods/25606939-d595-4bfc-aead-c40883fdae31/volumes" Nov 26 05:42:59 crc kubenswrapper[4871]: I1126 05:42:58.698187 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b3f9dfba-a3a9-45ef-a96c-91c654671b97","Type":"ContainerStarted","Data":"513d02f3f345dbd6ce9c339ae25ddc4d8f3df0f1677b53fdda76444113568d54"} Nov 26 05:42:59 crc kubenswrapper[4871]: I1126 05:42:58.699154 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:42:59 crc kubenswrapper[4871]: I1126 05:42:58.704480 4871 generic.go:334] "Generic (PLEG): container finished" podID="7df95f1b-7a5b-445e-bb56-b17695a0bde9" containerID="79364121dfc1db6ae8045d060d96f8aa6dcc8b206d819a10ae064a1efe7325b7" exitCode=0 Nov 26 05:42:59 crc kubenswrapper[4871]: I1126 05:42:58.704564 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"7df95f1b-7a5b-445e-bb56-b17695a0bde9","Type":"ContainerDied","Data":"79364121dfc1db6ae8045d060d96f8aa6dcc8b206d819a10ae064a1efe7325b7"} Nov 26 05:42:59 crc kubenswrapper[4871]: I1126 05:42:58.739188 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.632889386 podStartE2EDuration="1m28.739163916s" podCreationTimestamp="2025-11-26 05:41:30 +0000 UTC" firstStartedPulling="2025-11-26 05:41:32.592585543 +0000 UTC m=+950.775637129" lastFinishedPulling="2025-11-26 05:42:23.698860073 +0000 UTC m=+1001.881911659" observedRunningTime="2025-11-26 05:42:58.726709741 +0000 UTC m=+1036.909761337" watchObservedRunningTime="2025-11-26 05:42:58.739163916 +0000 UTC m=+1036.922215542" Nov 26 05:42:59 crc kubenswrapper[4871]: I1126 05:42:58.764323 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 05:42:59 crc kubenswrapper[4871]: W1126 05:42:58.768006 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda58d8ebe_c4cb_45c2_8529_d1094ee56518.slice/crio-a91c0dd1437315a9192a3816f1120ec7ab4a29a700016858c01ca509aca45f4e WatchSource:0}: Error finding container a91c0dd1437315a9192a3816f1120ec7ab4a29a700016858c01ca509aca45f4e: Status 404 returned error can't find the container with id a91c0dd1437315a9192a3816f1120ec7ab4a29a700016858c01ca509aca45f4e Nov 26 05:42:59 crc kubenswrapper[4871]: I1126 05:42:59.718869 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-notifications-server-0" event={"ID":"7df95f1b-7a5b-445e-bb56-b17695a0bde9","Type":"ContainerStarted","Data":"d1165fd23d11b48f6bf17b446719bb72d9a157591abfd86ad19914744d381a06"} Nov 26 05:42:59 crc kubenswrapper[4871]: I1126 05:42:59.719600 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:42:59 crc kubenswrapper[4871]: I1126 05:42:59.720120 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a58d8ebe-c4cb-45c2-8529-d1094ee56518","Type":"ContainerStarted","Data":"a91c0dd1437315a9192a3816f1120ec7ab4a29a700016858c01ca509aca45f4e"} Nov 26 05:42:59 crc kubenswrapper[4871]: I1126 05:42:59.721999 4871 generic.go:334] "Generic (PLEG): container finished" podID="4ba97673-d74c-47df-acae-f2dcc1ed10df" containerID="d56886216524c0c1586b2b6af70c6b9c3cb40243a032857c40c053aea9413760" exitCode=0 Nov 26 05:42:59 crc kubenswrapper[4871]: I1126 05:42:59.722066 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4ba97673-d74c-47df-acae-f2dcc1ed10df","Type":"ContainerDied","Data":"d56886216524c0c1586b2b6af70c6b9c3cb40243a032857c40c053aea9413760"} Nov 26 05:42:59 crc kubenswrapper[4871]: I1126 05:42:59.754494 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-notifications-server-0" podStartSLOduration=-9223371947.100315 podStartE2EDuration="1m29.754460145s" podCreationTimestamp="2025-11-26 05:41:30 +0000 UTC" firstStartedPulling="2025-11-26 05:41:32.860022315 +0000 UTC m=+951.043073911" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:42:59.752592988 +0000 UTC m=+1037.935644574" watchObservedRunningTime="2025-11-26 05:42:59.754460145 +0000 UTC m=+1037.937511771" Nov 26 05:43:00 crc kubenswrapper[4871]: I1126 05:43:00.731566 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4ba97673-d74c-47df-acae-f2dcc1ed10df","Type":"ContainerStarted","Data":"2ac192240d5ce8763cbcf70657fb48aeddf20da69468a9431bfba953d797d02f"} Nov 26 05:43:00 crc kubenswrapper[4871]: I1126 05:43:00.732105 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 26 05:43:00 crc kubenswrapper[4871]: I1126 05:43:00.765648 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=-9223371946.089151 podStartE2EDuration="1m30.7656252s" podCreationTimestamp="2025-11-26 05:41:30 +0000 UTC" firstStartedPulling="2025-11-26 05:41:32.384789434 +0000 UTC m=+950.567841020" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:43:00.761579399 +0000 UTC m=+1038.944630985" watchObservedRunningTime="2025-11-26 05:43:00.7656252 +0000 UTC m=+1038.948676826" Nov 26 05:43:01 crc kubenswrapper[4871]: I1126 05:43:01.734784 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-m255d" podUID="de8a947b-6c51-4c33-b221-ea16d851bafb" containerName="ovn-controller" probeResult="failure" output=< Nov 26 05:43:01 crc kubenswrapper[4871]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 26 05:43:01 crc kubenswrapper[4871]: > Nov 26 05:43:01 crc kubenswrapper[4871]: I1126 05:43:01.746978 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a58d8ebe-c4cb-45c2-8529-d1094ee56518","Type":"ContainerStarted","Data":"27e567f8947af13e33825131fd9bc0fbd300ba7609775a78622bf80250518c7c"} Nov 26 05:43:01 crc kubenswrapper[4871]: I1126 05:43:01.787787 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:43:01 crc kubenswrapper[4871]: I1126 05:43:01.791561 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-t9t82" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.040311 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-m255d-config-bszrt"] Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.042278 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.044410 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.046417 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c755f62c-0406-4201-aff8-b3f33a277501-var-log-ovn\") pod \"ovn-controller-m255d-config-bszrt\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.046495 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvhhd\" (UniqueName: \"kubernetes.io/projected/c755f62c-0406-4201-aff8-b3f33a277501-kube-api-access-bvhhd\") pod \"ovn-controller-m255d-config-bszrt\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.046568 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c755f62c-0406-4201-aff8-b3f33a277501-scripts\") pod \"ovn-controller-m255d-config-bszrt\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.046683 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c755f62c-0406-4201-aff8-b3f33a277501-var-run-ovn\") pod \"ovn-controller-m255d-config-bszrt\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.046755 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c755f62c-0406-4201-aff8-b3f33a277501-additional-scripts\") pod \"ovn-controller-m255d-config-bszrt\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.046812 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c755f62c-0406-4201-aff8-b3f33a277501-var-run\") pod \"ovn-controller-m255d-config-bszrt\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.051273 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-m255d-config-bszrt"] Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.148094 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c755f62c-0406-4201-aff8-b3f33a277501-additional-scripts\") pod \"ovn-controller-m255d-config-bszrt\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.148179 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c755f62c-0406-4201-aff8-b3f33a277501-var-run\") pod \"ovn-controller-m255d-config-bszrt\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.148247 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c755f62c-0406-4201-aff8-b3f33a277501-var-log-ovn\") pod \"ovn-controller-m255d-config-bszrt\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.148281 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvhhd\" (UniqueName: \"kubernetes.io/projected/c755f62c-0406-4201-aff8-b3f33a277501-kube-api-access-bvhhd\") pod \"ovn-controller-m255d-config-bszrt\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.148342 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c755f62c-0406-4201-aff8-b3f33a277501-scripts\") pod \"ovn-controller-m255d-config-bszrt\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.148433 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c755f62c-0406-4201-aff8-b3f33a277501-var-run-ovn\") pod \"ovn-controller-m255d-config-bszrt\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.148573 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c755f62c-0406-4201-aff8-b3f33a277501-var-run-ovn\") pod \"ovn-controller-m255d-config-bszrt\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.148581 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c755f62c-0406-4201-aff8-b3f33a277501-var-run\") pod \"ovn-controller-m255d-config-bszrt\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.148642 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c755f62c-0406-4201-aff8-b3f33a277501-var-log-ovn\") pod \"ovn-controller-m255d-config-bszrt\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.149103 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c755f62c-0406-4201-aff8-b3f33a277501-additional-scripts\") pod \"ovn-controller-m255d-config-bszrt\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.150787 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c755f62c-0406-4201-aff8-b3f33a277501-scripts\") pod \"ovn-controller-m255d-config-bszrt\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.177697 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvhhd\" (UniqueName: \"kubernetes.io/projected/c755f62c-0406-4201-aff8-b3f33a277501-kube-api-access-bvhhd\") pod \"ovn-controller-m255d-config-bszrt\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.364424 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:02 crc kubenswrapper[4871]: I1126 05:43:02.795055 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-m255d-config-bszrt"] Nov 26 05:43:02 crc kubenswrapper[4871]: W1126 05:43:02.807703 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc755f62c_0406_4201_aff8_b3f33a277501.slice/crio-67bd6f7b54124dbb90b36e024e8aa6fcc2ca1971311f9d29ea5f50ff34ec8f1b WatchSource:0}: Error finding container 67bd6f7b54124dbb90b36e024e8aa6fcc2ca1971311f9d29ea5f50ff34ec8f1b: Status 404 returned error can't find the container with id 67bd6f7b54124dbb90b36e024e8aa6fcc2ca1971311f9d29ea5f50ff34ec8f1b Nov 26 05:43:03 crc kubenswrapper[4871]: I1126 05:43:03.763983 4871 generic.go:334] "Generic (PLEG): container finished" podID="c755f62c-0406-4201-aff8-b3f33a277501" containerID="7f8ee206d672f505d17cead024215e90be09e4e8f2e5ff42e1513630e20112bc" exitCode=0 Nov 26 05:43:03 crc kubenswrapper[4871]: I1126 05:43:03.764130 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-m255d-config-bszrt" event={"ID":"c755f62c-0406-4201-aff8-b3f33a277501","Type":"ContainerDied","Data":"7f8ee206d672f505d17cead024215e90be09e4e8f2e5ff42e1513630e20112bc"} Nov 26 05:43:03 crc kubenswrapper[4871]: I1126 05:43:03.764443 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-m255d-config-bszrt" event={"ID":"c755f62c-0406-4201-aff8-b3f33a277501","Type":"ContainerStarted","Data":"67bd6f7b54124dbb90b36e024e8aa6fcc2ca1971311f9d29ea5f50ff34ec8f1b"} Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.115060 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.201808 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvhhd\" (UniqueName: \"kubernetes.io/projected/c755f62c-0406-4201-aff8-b3f33a277501-kube-api-access-bvhhd\") pod \"c755f62c-0406-4201-aff8-b3f33a277501\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.201936 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c755f62c-0406-4201-aff8-b3f33a277501-var-log-ovn\") pod \"c755f62c-0406-4201-aff8-b3f33a277501\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.202064 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c755f62c-0406-4201-aff8-b3f33a277501-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "c755f62c-0406-4201-aff8-b3f33a277501" (UID: "c755f62c-0406-4201-aff8-b3f33a277501"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.202091 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c755f62c-0406-4201-aff8-b3f33a277501-scripts\") pod \"c755f62c-0406-4201-aff8-b3f33a277501\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.202256 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c755f62c-0406-4201-aff8-b3f33a277501-var-run-ovn\") pod \"c755f62c-0406-4201-aff8-b3f33a277501\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.202300 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c755f62c-0406-4201-aff8-b3f33a277501-additional-scripts\") pod \"c755f62c-0406-4201-aff8-b3f33a277501\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.202350 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c755f62c-0406-4201-aff8-b3f33a277501-var-run\") pod \"c755f62c-0406-4201-aff8-b3f33a277501\" (UID: \"c755f62c-0406-4201-aff8-b3f33a277501\") " Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.202358 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c755f62c-0406-4201-aff8-b3f33a277501-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "c755f62c-0406-4201-aff8-b3f33a277501" (UID: "c755f62c-0406-4201-aff8-b3f33a277501"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.202488 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c755f62c-0406-4201-aff8-b3f33a277501-var-run" (OuterVolumeSpecName: "var-run") pod "c755f62c-0406-4201-aff8-b3f33a277501" (UID: "c755f62c-0406-4201-aff8-b3f33a277501"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.203049 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c755f62c-0406-4201-aff8-b3f33a277501-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "c755f62c-0406-4201-aff8-b3f33a277501" (UID: "c755f62c-0406-4201-aff8-b3f33a277501"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.203153 4871 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c755f62c-0406-4201-aff8-b3f33a277501-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.203180 4871 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c755f62c-0406-4201-aff8-b3f33a277501-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.203194 4871 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c755f62c-0406-4201-aff8-b3f33a277501-var-run\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.203392 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c755f62c-0406-4201-aff8-b3f33a277501-scripts" (OuterVolumeSpecName: "scripts") pod "c755f62c-0406-4201-aff8-b3f33a277501" (UID: "c755f62c-0406-4201-aff8-b3f33a277501"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.209953 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c755f62c-0406-4201-aff8-b3f33a277501-kube-api-access-bvhhd" (OuterVolumeSpecName: "kube-api-access-bvhhd") pod "c755f62c-0406-4201-aff8-b3f33a277501" (UID: "c755f62c-0406-4201-aff8-b3f33a277501"). InnerVolumeSpecName "kube-api-access-bvhhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.304692 4871 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/c755f62c-0406-4201-aff8-b3f33a277501-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.304726 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bvhhd\" (UniqueName: \"kubernetes.io/projected/c755f62c-0406-4201-aff8-b3f33a277501-kube-api-access-bvhhd\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.304741 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c755f62c-0406-4201-aff8-b3f33a277501-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.792877 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-m255d-config-bszrt" event={"ID":"c755f62c-0406-4201-aff8-b3f33a277501","Type":"ContainerDied","Data":"67bd6f7b54124dbb90b36e024e8aa6fcc2ca1971311f9d29ea5f50ff34ec8f1b"} Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.793374 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="67bd6f7b54124dbb90b36e024e8aa6fcc2ca1971311f9d29ea5f50ff34ec8f1b" Nov 26 05:43:05 crc kubenswrapper[4871]: I1126 05:43:05.792977 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-m255d-config-bszrt" Nov 26 05:43:06 crc kubenswrapper[4871]: I1126 05:43:06.254316 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-m255d-config-bszrt"] Nov 26 05:43:06 crc kubenswrapper[4871]: I1126 05:43:06.264465 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-m255d-config-bszrt"] Nov 26 05:43:06 crc kubenswrapper[4871]: I1126 05:43:06.522514 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c755f62c-0406-4201-aff8-b3f33a277501" path="/var/lib/kubelet/pods/c755f62c-0406-4201-aff8-b3f33a277501/volumes" Nov 26 05:43:06 crc kubenswrapper[4871]: I1126 05:43:06.743234 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-m255d" Nov 26 05:43:08 crc kubenswrapper[4871]: E1126 05:43:08.450609 4871 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda58d8ebe_c4cb_45c2_8529_d1094ee56518.slice/crio-conmon-27e567f8947af13e33825131fd9bc0fbd300ba7609775a78622bf80250518c7c.scope\": RecentStats: unable to find data in memory cache]" Nov 26 05:43:08 crc kubenswrapper[4871]: I1126 05:43:08.821414 4871 generic.go:334] "Generic (PLEG): container finished" podID="a58d8ebe-c4cb-45c2-8529-d1094ee56518" containerID="27e567f8947af13e33825131fd9bc0fbd300ba7609775a78622bf80250518c7c" exitCode=0 Nov 26 05:43:08 crc kubenswrapper[4871]: I1126 05:43:08.821473 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a58d8ebe-c4cb-45c2-8529-d1094ee56518","Type":"ContainerDied","Data":"27e567f8947af13e33825131fd9bc0fbd300ba7609775a78622bf80250518c7c"} Nov 26 05:43:09 crc kubenswrapper[4871]: I1126 05:43:09.834048 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a58d8ebe-c4cb-45c2-8529-d1094ee56518","Type":"ContainerStarted","Data":"828d8d5299d261ae0bc7ff55dd4131d3f68e2eb13f1abbc97d7c4a29df22da69"} Nov 26 05:43:10 crc kubenswrapper[4871]: I1126 05:43:10.808582 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:43:10 crc kubenswrapper[4871]: I1126 05:43:10.865497 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c927c3b8-9d32-4cbb-97cc-d834a6e225c1-etc-swift\") pod \"swift-storage-0\" (UID: \"c927c3b8-9d32-4cbb-97cc-d834a6e225c1\") " pod="openstack/swift-storage-0" Nov 26 05:43:10 crc kubenswrapper[4871]: I1126 05:43:10.988008 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 26 05:43:11 crc kubenswrapper[4871]: I1126 05:43:11.602092 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 26 05:43:11 crc kubenswrapper[4871]: W1126 05:43:11.620705 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc927c3b8_9d32_4cbb_97cc_d834a6e225c1.slice/crio-0cd72ccb275d0ee91dda7fbf6864277086823ba9db4616f3b736b77bb99fa5da WatchSource:0}: Error finding container 0cd72ccb275d0ee91dda7fbf6864277086823ba9db4616f3b736b77bb99fa5da: Status 404 returned error can't find the container with id 0cd72ccb275d0ee91dda7fbf6864277086823ba9db4616f3b736b77bb99fa5da Nov 26 05:43:11 crc kubenswrapper[4871]: I1126 05:43:11.799388 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="4ba97673-d74c-47df-acae-f2dcc1ed10df" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.109:5671: connect: connection refused" Nov 26 05:43:11 crc kubenswrapper[4871]: I1126 05:43:11.850865 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c927c3b8-9d32-4cbb-97cc-d834a6e225c1","Type":"ContainerStarted","Data":"0cd72ccb275d0ee91dda7fbf6864277086823ba9db4616f3b736b77bb99fa5da"} Nov 26 05:43:12 crc kubenswrapper[4871]: I1126 05:43:12.083604 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="b3f9dfba-a3a9-45ef-a96c-91c654671b97" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.110:5671: connect: connection refused" Nov 26 05:43:12 crc kubenswrapper[4871]: I1126 05:43:12.351684 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-notifications-server-0" podUID="7df95f1b-7a5b-445e-bb56-b17695a0bde9" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.111:5671: connect: connection refused" Nov 26 05:43:12 crc kubenswrapper[4871]: I1126 05:43:12.863837 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a58d8ebe-c4cb-45c2-8529-d1094ee56518","Type":"ContainerStarted","Data":"c91093b08ccb0bf438dac84877b6239ad13e3ef34f3e327645ffe7ea7c5763f8"} Nov 26 05:43:12 crc kubenswrapper[4871]: I1126 05:43:12.863888 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a58d8ebe-c4cb-45c2-8529-d1094ee56518","Type":"ContainerStarted","Data":"4b244c43246bcc6d93fa6e6ab5c1c8994ef3e0285f170a7812f3854671280764"} Nov 26 05:43:12 crc kubenswrapper[4871]: I1126 05:43:12.874867 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c927c3b8-9d32-4cbb-97cc-d834a6e225c1","Type":"ContainerStarted","Data":"82fdeb3fed4d6335e6666b02e9acef338edf012ed8954a8a198efea224df7894"} Nov 26 05:43:12 crc kubenswrapper[4871]: I1126 05:43:12.931887 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=15.931861955 podStartE2EDuration="15.931861955s" podCreationTimestamp="2025-11-26 05:42:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:43:12.928940382 +0000 UTC m=+1051.111992018" watchObservedRunningTime="2025-11-26 05:43:12.931861955 +0000 UTC m=+1051.114913551" Nov 26 05:43:13 crc kubenswrapper[4871]: I1126 05:43:13.258334 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 26 05:43:13 crc kubenswrapper[4871]: I1126 05:43:13.258711 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 26 05:43:13 crc kubenswrapper[4871]: I1126 05:43:13.266127 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 26 05:43:13 crc kubenswrapper[4871]: I1126 05:43:13.888336 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c927c3b8-9d32-4cbb-97cc-d834a6e225c1","Type":"ContainerStarted","Data":"5072428ffa2ae6c3d880f2c7d814451cf8a9c3f5c5cf5b5aaf9cb22768fd1f24"} Nov 26 05:43:13 crc kubenswrapper[4871]: I1126 05:43:13.888395 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c927c3b8-9d32-4cbb-97cc-d834a6e225c1","Type":"ContainerStarted","Data":"cc7e5798a8de83bee9f008a22f4ea64336a04a699253ce892675772ef34f0aa0"} Nov 26 05:43:13 crc kubenswrapper[4871]: I1126 05:43:13.888405 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c927c3b8-9d32-4cbb-97cc-d834a6e225c1","Type":"ContainerStarted","Data":"ccca21e721a7ff7e3a100d52dfafd01ae6a646a1e6bce079a79df0614ba1f05e"} Nov 26 05:43:13 crc kubenswrapper[4871]: I1126 05:43:13.893877 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 26 05:43:14 crc kubenswrapper[4871]: I1126 05:43:14.903589 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c927c3b8-9d32-4cbb-97cc-d834a6e225c1","Type":"ContainerStarted","Data":"b946f1474e7044a6026d4bb8266c791bd94f05664f561021f9eb189c6c740e68"} Nov 26 05:43:14 crc kubenswrapper[4871]: I1126 05:43:14.903905 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c927c3b8-9d32-4cbb-97cc-d834a6e225c1","Type":"ContainerStarted","Data":"4ef6715ef223db1778a6bc236da0ed4537fa4c688e9944dd11d453ea45e3acc5"} Nov 26 05:43:14 crc kubenswrapper[4871]: I1126 05:43:14.903917 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c927c3b8-9d32-4cbb-97cc-d834a6e225c1","Type":"ContainerStarted","Data":"96c0f0e4e757540abebce6a266931fb8e8482697e324317f065f9994fc764b62"} Nov 26 05:43:14 crc kubenswrapper[4871]: I1126 05:43:14.903926 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c927c3b8-9d32-4cbb-97cc-d834a6e225c1","Type":"ContainerStarted","Data":"e7a606db23784f31b99711bb766fc4b14aa91e19b310fa5134c108714503f0b3"} Nov 26 05:43:16 crc kubenswrapper[4871]: I1126 05:43:16.950077 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c927c3b8-9d32-4cbb-97cc-d834a6e225c1","Type":"ContainerStarted","Data":"2512cefc9f0dec81d307114059f4f055c43681f0dffac5dbd773263d400cac1b"} Nov 26 05:43:16 crc kubenswrapper[4871]: I1126 05:43:16.950641 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c927c3b8-9d32-4cbb-97cc-d834a6e225c1","Type":"ContainerStarted","Data":"b600c9e487e6a4f207f68f0033b2decbc4e5c9d92b6e920d64e07ba8b814774b"} Nov 26 05:43:16 crc kubenswrapper[4871]: I1126 05:43:16.950655 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c927c3b8-9d32-4cbb-97cc-d834a6e225c1","Type":"ContainerStarted","Data":"b2c49786be7a8c3bb9898e7ee84e2de6cca0aba81447cd40522bbdae87b78714"} Nov 26 05:43:17 crc kubenswrapper[4871]: I1126 05:43:17.971157 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c927c3b8-9d32-4cbb-97cc-d834a6e225c1","Type":"ContainerStarted","Data":"e7d0050dff5349c843a02f2ad9d41dba3adfc66170aa36de3d52c7e60c89cffe"} Nov 26 05:43:17 crc kubenswrapper[4871]: I1126 05:43:17.971608 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c927c3b8-9d32-4cbb-97cc-d834a6e225c1","Type":"ContainerStarted","Data":"24cdb26e0ff19469b286f5aef65e04e3e9aebf323ce84c1710c5c5ef29199e0b"} Nov 26 05:43:17 crc kubenswrapper[4871]: I1126 05:43:17.971630 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c927c3b8-9d32-4cbb-97cc-d834a6e225c1","Type":"ContainerStarted","Data":"78720dc77e98205d1033d0aca060724f76aedbaf626a7ba4ef8081eb075135ab"} Nov 26 05:43:17 crc kubenswrapper[4871]: I1126 05:43:17.971645 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c927c3b8-9d32-4cbb-97cc-d834a6e225c1","Type":"ContainerStarted","Data":"093935e178175659b36744d6d77083c460c434c58075c36bcef8065ca5b9b877"} Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.023682 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=36.402741708 podStartE2EDuration="41.02366513s" podCreationTimestamp="2025-11-26 05:42:37 +0000 UTC" firstStartedPulling="2025-11-26 05:43:11.623043703 +0000 UTC m=+1049.806095289" lastFinishedPulling="2025-11-26 05:43:16.243967125 +0000 UTC m=+1054.427018711" observedRunningTime="2025-11-26 05:43:18.018340258 +0000 UTC m=+1056.201391834" watchObservedRunningTime="2025-11-26 05:43:18.02366513 +0000 UTC m=+1056.206716716" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.306863 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55b99bf79c-8q6qq"] Nov 26 05:43:18 crc kubenswrapper[4871]: E1126 05:43:18.307220 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c755f62c-0406-4201-aff8-b3f33a277501" containerName="ovn-config" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.307234 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="c755f62c-0406-4201-aff8-b3f33a277501" containerName="ovn-config" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.307410 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="c755f62c-0406-4201-aff8-b3f33a277501" containerName="ovn-config" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.308966 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.312880 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.320577 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55b99bf79c-8q6qq"] Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.448098 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-config\") pod \"dnsmasq-dns-55b99bf79c-8q6qq\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.448229 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-dns-svc\") pod \"dnsmasq-dns-55b99bf79c-8q6qq\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.448261 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-ovsdbserver-sb\") pod \"dnsmasq-dns-55b99bf79c-8q6qq\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.448341 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-dns-swift-storage-0\") pod \"dnsmasq-dns-55b99bf79c-8q6qq\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.448369 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vwf6\" (UniqueName: \"kubernetes.io/projected/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-kube-api-access-4vwf6\") pod \"dnsmasq-dns-55b99bf79c-8q6qq\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.448391 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-ovsdbserver-nb\") pod \"dnsmasq-dns-55b99bf79c-8q6qq\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.549674 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-dns-svc\") pod \"dnsmasq-dns-55b99bf79c-8q6qq\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.550054 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-ovsdbserver-sb\") pod \"dnsmasq-dns-55b99bf79c-8q6qq\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.550167 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-dns-swift-storage-0\") pod \"dnsmasq-dns-55b99bf79c-8q6qq\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.550210 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vwf6\" (UniqueName: \"kubernetes.io/projected/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-kube-api-access-4vwf6\") pod \"dnsmasq-dns-55b99bf79c-8q6qq\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.550256 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-ovsdbserver-nb\") pod \"dnsmasq-dns-55b99bf79c-8q6qq\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.550388 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-config\") pod \"dnsmasq-dns-55b99bf79c-8q6qq\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.550652 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-dns-svc\") pod \"dnsmasq-dns-55b99bf79c-8q6qq\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.551146 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-dns-swift-storage-0\") pod \"dnsmasq-dns-55b99bf79c-8q6qq\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.551163 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-ovsdbserver-sb\") pod \"dnsmasq-dns-55b99bf79c-8q6qq\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.551408 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-ovsdbserver-nb\") pod \"dnsmasq-dns-55b99bf79c-8q6qq\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.551605 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-config\") pod \"dnsmasq-dns-55b99bf79c-8q6qq\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.570266 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vwf6\" (UniqueName: \"kubernetes.io/projected/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-kube-api-access-4vwf6\") pod \"dnsmasq-dns-55b99bf79c-8q6qq\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:18 crc kubenswrapper[4871]: I1126 05:43:18.632420 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:19 crc kubenswrapper[4871]: I1126 05:43:19.172132 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55b99bf79c-8q6qq"] Nov 26 05:43:19 crc kubenswrapper[4871]: I1126 05:43:19.997521 4871 generic.go:334] "Generic (PLEG): container finished" podID="bc4bffe3-86fd-478d-95cf-edf716bbf3f2" containerID="4ed36338b1de980506adbc0c7934b42033af86f0054606599c6677f07e96669b" exitCode=0 Nov 26 05:43:19 crc kubenswrapper[4871]: I1126 05:43:19.997642 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" event={"ID":"bc4bffe3-86fd-478d-95cf-edf716bbf3f2","Type":"ContainerDied","Data":"4ed36338b1de980506adbc0c7934b42033af86f0054606599c6677f07e96669b"} Nov 26 05:43:19 crc kubenswrapper[4871]: I1126 05:43:19.998151 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" event={"ID":"bc4bffe3-86fd-478d-95cf-edf716bbf3f2","Type":"ContainerStarted","Data":"8df039fbccdcda8291f39b42da1aa2ac67afbe2d4dd352237156f98ce5923d74"} Nov 26 05:43:21 crc kubenswrapper[4871]: I1126 05:43:21.013239 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" event={"ID":"bc4bffe3-86fd-478d-95cf-edf716bbf3f2","Type":"ContainerStarted","Data":"4b3f7ef4dd996794249355cfc7b0eae92d7ebc4d4d9080ced156cb574b241e25"} Nov 26 05:43:21 crc kubenswrapper[4871]: I1126 05:43:21.013996 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:21 crc kubenswrapper[4871]: I1126 05:43:21.055829 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" podStartSLOduration=3.05579571 podStartE2EDuration="3.05579571s" podCreationTimestamp="2025-11-26 05:43:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:43:21.041004794 +0000 UTC m=+1059.224056450" watchObservedRunningTime="2025-11-26 05:43:21.05579571 +0000 UTC m=+1059.238847336" Nov 26 05:43:21 crc kubenswrapper[4871]: I1126 05:43:21.801769 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.084724 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.213415 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-a517-account-create-update-zhft2"] Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.214711 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a517-account-create-update-zhft2" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.218864 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.233416 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-a517-account-create-update-zhft2"] Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.298485 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-k6ddc"] Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.301987 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-k6ddc" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.313373 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-k6ddc"] Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.326151 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4666a76-36db-4a3e-a12d-0cfb82284f7a-operator-scripts\") pod \"barbican-a517-account-create-update-zhft2\" (UID: \"a4666a76-36db-4a3e-a12d-0cfb82284f7a\") " pod="openstack/barbican-a517-account-create-update-zhft2" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.326369 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmj87\" (UniqueName: \"kubernetes.io/projected/a4666a76-36db-4a3e-a12d-0cfb82284f7a-kube-api-access-mmj87\") pod \"barbican-a517-account-create-update-zhft2\" (UID: \"a4666a76-36db-4a3e-a12d-0cfb82284f7a\") " pod="openstack/barbican-a517-account-create-update-zhft2" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.352718 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-notifications-server-0" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.406544 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-kj54h"] Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.407822 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-kj54h" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.418823 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-6b92-account-create-update-hzv5c"] Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.420128 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-6b92-account-create-update-hzv5c" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.421639 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.450269 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4666a76-36db-4a3e-a12d-0cfb82284f7a-operator-scripts\") pod \"barbican-a517-account-create-update-zhft2\" (UID: \"a4666a76-36db-4a3e-a12d-0cfb82284f7a\") " pod="openstack/barbican-a517-account-create-update-zhft2" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.450370 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d791f62b-80ce-4ea9-acdc-bfb288614bac-operator-scripts\") pod \"barbican-db-create-k6ddc\" (UID: \"d791f62b-80ce-4ea9-acdc-bfb288614bac\") " pod="openstack/barbican-db-create-k6ddc" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.450426 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8bq4\" (UniqueName: \"kubernetes.io/projected/d791f62b-80ce-4ea9-acdc-bfb288614bac-kube-api-access-t8bq4\") pod \"barbican-db-create-k6ddc\" (UID: \"d791f62b-80ce-4ea9-acdc-bfb288614bac\") " pod="openstack/barbican-db-create-k6ddc" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.450456 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/07bd5933-72a5-4f99-b5f9-08d8cadc77e8-operator-scripts\") pod \"cinder-6b92-account-create-update-hzv5c\" (UID: \"07bd5933-72a5-4f99-b5f9-08d8cadc77e8\") " pod="openstack/cinder-6b92-account-create-update-hzv5c" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.450492 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2b2h\" (UniqueName: \"kubernetes.io/projected/07bd5933-72a5-4f99-b5f9-08d8cadc77e8-kube-api-access-x2b2h\") pod \"cinder-6b92-account-create-update-hzv5c\" (UID: \"07bd5933-72a5-4f99-b5f9-08d8cadc77e8\") " pod="openstack/cinder-6b92-account-create-update-hzv5c" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.450520 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmj87\" (UniqueName: \"kubernetes.io/projected/a4666a76-36db-4a3e-a12d-0cfb82284f7a-kube-api-access-mmj87\") pod \"barbican-a517-account-create-update-zhft2\" (UID: \"a4666a76-36db-4a3e-a12d-0cfb82284f7a\") " pod="openstack/barbican-a517-account-create-update-zhft2" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.450999 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4666a76-36db-4a3e-a12d-0cfb82284f7a-operator-scripts\") pod \"barbican-a517-account-create-update-zhft2\" (UID: \"a4666a76-36db-4a3e-a12d-0cfb82284f7a\") " pod="openstack/barbican-a517-account-create-update-zhft2" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.494392 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmj87\" (UniqueName: \"kubernetes.io/projected/a4666a76-36db-4a3e-a12d-0cfb82284f7a-kube-api-access-mmj87\") pod \"barbican-a517-account-create-update-zhft2\" (UID: \"a4666a76-36db-4a3e-a12d-0cfb82284f7a\") " pod="openstack/barbican-a517-account-create-update-zhft2" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.532779 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-kj54h"] Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.533305 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a517-account-create-update-zhft2" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.554503 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d791f62b-80ce-4ea9-acdc-bfb288614bac-operator-scripts\") pod \"barbican-db-create-k6ddc\" (UID: \"d791f62b-80ce-4ea9-acdc-bfb288614bac\") " pod="openstack/barbican-db-create-k6ddc" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.554588 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whz7b\" (UniqueName: \"kubernetes.io/projected/09f49677-5f6f-4144-9e29-4db96e4fcb1e-kube-api-access-whz7b\") pod \"cinder-db-create-kj54h\" (UID: \"09f49677-5f6f-4144-9e29-4db96e4fcb1e\") " pod="openstack/cinder-db-create-kj54h" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.554619 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8bq4\" (UniqueName: \"kubernetes.io/projected/d791f62b-80ce-4ea9-acdc-bfb288614bac-kube-api-access-t8bq4\") pod \"barbican-db-create-k6ddc\" (UID: \"d791f62b-80ce-4ea9-acdc-bfb288614bac\") " pod="openstack/barbican-db-create-k6ddc" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.554652 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/07bd5933-72a5-4f99-b5f9-08d8cadc77e8-operator-scripts\") pod \"cinder-6b92-account-create-update-hzv5c\" (UID: \"07bd5933-72a5-4f99-b5f9-08d8cadc77e8\") " pod="openstack/cinder-6b92-account-create-update-hzv5c" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.554672 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/09f49677-5f6f-4144-9e29-4db96e4fcb1e-operator-scripts\") pod \"cinder-db-create-kj54h\" (UID: \"09f49677-5f6f-4144-9e29-4db96e4fcb1e\") " pod="openstack/cinder-db-create-kj54h" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.554693 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2b2h\" (UniqueName: \"kubernetes.io/projected/07bd5933-72a5-4f99-b5f9-08d8cadc77e8-kube-api-access-x2b2h\") pod \"cinder-6b92-account-create-update-hzv5c\" (UID: \"07bd5933-72a5-4f99-b5f9-08d8cadc77e8\") " pod="openstack/cinder-6b92-account-create-update-hzv5c" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.556644 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d791f62b-80ce-4ea9-acdc-bfb288614bac-operator-scripts\") pod \"barbican-db-create-k6ddc\" (UID: \"d791f62b-80ce-4ea9-acdc-bfb288614bac\") " pod="openstack/barbican-db-create-k6ddc" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.557679 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/07bd5933-72a5-4f99-b5f9-08d8cadc77e8-operator-scripts\") pod \"cinder-6b92-account-create-update-hzv5c\" (UID: \"07bd5933-72a5-4f99-b5f9-08d8cadc77e8\") " pod="openstack/cinder-6b92-account-create-update-hzv5c" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.567737 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-6b92-account-create-update-hzv5c"] Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.584060 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2b2h\" (UniqueName: \"kubernetes.io/projected/07bd5933-72a5-4f99-b5f9-08d8cadc77e8-kube-api-access-x2b2h\") pod \"cinder-6b92-account-create-update-hzv5c\" (UID: \"07bd5933-72a5-4f99-b5f9-08d8cadc77e8\") " pod="openstack/cinder-6b92-account-create-update-hzv5c" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.614487 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8bq4\" (UniqueName: \"kubernetes.io/projected/d791f62b-80ce-4ea9-acdc-bfb288614bac-kube-api-access-t8bq4\") pod \"barbican-db-create-k6ddc\" (UID: \"d791f62b-80ce-4ea9-acdc-bfb288614bac\") " pod="openstack/barbican-db-create-k6ddc" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.617895 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-k6ddc" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.658827 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whz7b\" (UniqueName: \"kubernetes.io/projected/09f49677-5f6f-4144-9e29-4db96e4fcb1e-kube-api-access-whz7b\") pod \"cinder-db-create-kj54h\" (UID: \"09f49677-5f6f-4144-9e29-4db96e4fcb1e\") " pod="openstack/cinder-db-create-kj54h" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.658890 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/09f49677-5f6f-4144-9e29-4db96e4fcb1e-operator-scripts\") pod \"cinder-db-create-kj54h\" (UID: \"09f49677-5f6f-4144-9e29-4db96e4fcb1e\") " pod="openstack/cinder-db-create-kj54h" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.659488 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/09f49677-5f6f-4144-9e29-4db96e4fcb1e-operator-scripts\") pod \"cinder-db-create-kj54h\" (UID: \"09f49677-5f6f-4144-9e29-4db96e4fcb1e\") " pod="openstack/cinder-db-create-kj54h" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.669914 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-2mr78"] Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.670993 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-2mr78" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.678265 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.679304 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-w7bjw" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.679547 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.679632 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.693803 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whz7b\" (UniqueName: \"kubernetes.io/projected/09f49677-5f6f-4144-9e29-4db96e4fcb1e-kube-api-access-whz7b\") pod \"cinder-db-create-kj54h\" (UID: \"09f49677-5f6f-4144-9e29-4db96e4fcb1e\") " pod="openstack/cinder-db-create-kj54h" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.709927 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-2mr78"] Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.763762 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/136e68fc-176f-4240-9876-53e81cc4caab-combined-ca-bundle\") pod \"keystone-db-sync-2mr78\" (UID: \"136e68fc-176f-4240-9876-53e81cc4caab\") " pod="openstack/keystone-db-sync-2mr78" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.763883 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/136e68fc-176f-4240-9876-53e81cc4caab-config-data\") pod \"keystone-db-sync-2mr78\" (UID: \"136e68fc-176f-4240-9876-53e81cc4caab\") " pod="openstack/keystone-db-sync-2mr78" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.763930 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqhq4\" (UniqueName: \"kubernetes.io/projected/136e68fc-176f-4240-9876-53e81cc4caab-kube-api-access-qqhq4\") pod \"keystone-db-sync-2mr78\" (UID: \"136e68fc-176f-4240-9876-53e81cc4caab\") " pod="openstack/keystone-db-sync-2mr78" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.769203 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-kj54h" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.776327 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-6b92-account-create-update-hzv5c" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.869656 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/136e68fc-176f-4240-9876-53e81cc4caab-combined-ca-bundle\") pod \"keystone-db-sync-2mr78\" (UID: \"136e68fc-176f-4240-9876-53e81cc4caab\") " pod="openstack/keystone-db-sync-2mr78" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.869781 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/136e68fc-176f-4240-9876-53e81cc4caab-config-data\") pod \"keystone-db-sync-2mr78\" (UID: \"136e68fc-176f-4240-9876-53e81cc4caab\") " pod="openstack/keystone-db-sync-2mr78" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.869801 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqhq4\" (UniqueName: \"kubernetes.io/projected/136e68fc-176f-4240-9876-53e81cc4caab-kube-api-access-qqhq4\") pod \"keystone-db-sync-2mr78\" (UID: \"136e68fc-176f-4240-9876-53e81cc4caab\") " pod="openstack/keystone-db-sync-2mr78" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.877622 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/136e68fc-176f-4240-9876-53e81cc4caab-combined-ca-bundle\") pod \"keystone-db-sync-2mr78\" (UID: \"136e68fc-176f-4240-9876-53e81cc4caab\") " pod="openstack/keystone-db-sync-2mr78" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.885949 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/136e68fc-176f-4240-9876-53e81cc4caab-config-data\") pod \"keystone-db-sync-2mr78\" (UID: \"136e68fc-176f-4240-9876-53e81cc4caab\") " pod="openstack/keystone-db-sync-2mr78" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.897876 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqhq4\" (UniqueName: \"kubernetes.io/projected/136e68fc-176f-4240-9876-53e81cc4caab-kube-api-access-qqhq4\") pod \"keystone-db-sync-2mr78\" (UID: \"136e68fc-176f-4240-9876-53e81cc4caab\") " pod="openstack/keystone-db-sync-2mr78" Nov 26 05:43:22 crc kubenswrapper[4871]: I1126 05:43:22.993635 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-a517-account-create-update-zhft2"] Nov 26 05:43:23 crc kubenswrapper[4871]: W1126 05:43:23.006310 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4666a76_36db_4a3e_a12d_0cfb82284f7a.slice/crio-5dea1eb13c594d872c8505ea7160917245941e4ff43db4ed8d0b11cd2869322d WatchSource:0}: Error finding container 5dea1eb13c594d872c8505ea7160917245941e4ff43db4ed8d0b11cd2869322d: Status 404 returned error can't find the container with id 5dea1eb13c594d872c8505ea7160917245941e4ff43db4ed8d0b11cd2869322d Nov 26 05:43:23 crc kubenswrapper[4871]: I1126 05:43:23.037017 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-a517-account-create-update-zhft2" event={"ID":"a4666a76-36db-4a3e-a12d-0cfb82284f7a","Type":"ContainerStarted","Data":"5dea1eb13c594d872c8505ea7160917245941e4ff43db4ed8d0b11cd2869322d"} Nov 26 05:43:23 crc kubenswrapper[4871]: I1126 05:43:23.040477 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-2mr78" Nov 26 05:43:23 crc kubenswrapper[4871]: I1126 05:43:23.342640 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-k6ddc"] Nov 26 05:43:23 crc kubenswrapper[4871]: I1126 05:43:23.395157 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-kj54h"] Nov 26 05:43:23 crc kubenswrapper[4871]: I1126 05:43:23.468170 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-6b92-account-create-update-hzv5c"] Nov 26 05:43:23 crc kubenswrapper[4871]: W1126 05:43:23.472040 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod07bd5933_72a5_4f99_b5f9_08d8cadc77e8.slice/crio-47409d8b7d3ec8874dd1526f24a71e63af3542c1a7026b1e6c6185609755ff4f WatchSource:0}: Error finding container 47409d8b7d3ec8874dd1526f24a71e63af3542c1a7026b1e6c6185609755ff4f: Status 404 returned error can't find the container with id 47409d8b7d3ec8874dd1526f24a71e63af3542c1a7026b1e6c6185609755ff4f Nov 26 05:43:23 crc kubenswrapper[4871]: I1126 05:43:23.634987 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-2mr78"] Nov 26 05:43:23 crc kubenswrapper[4871]: W1126 05:43:23.684784 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod136e68fc_176f_4240_9876_53e81cc4caab.slice/crio-694724d091f1e88faf63eb1a004125a8c306daf1a1d16bd4d74b21a7d23a2b7e WatchSource:0}: Error finding container 694724d091f1e88faf63eb1a004125a8c306daf1a1d16bd4d74b21a7d23a2b7e: Status 404 returned error can't find the container with id 694724d091f1e88faf63eb1a004125a8c306daf1a1d16bd4d74b21a7d23a2b7e Nov 26 05:43:24 crc kubenswrapper[4871]: I1126 05:43:24.048648 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-2mr78" event={"ID":"136e68fc-176f-4240-9876-53e81cc4caab","Type":"ContainerStarted","Data":"694724d091f1e88faf63eb1a004125a8c306daf1a1d16bd4d74b21a7d23a2b7e"} Nov 26 05:43:24 crc kubenswrapper[4871]: I1126 05:43:24.050848 4871 generic.go:334] "Generic (PLEG): container finished" podID="09f49677-5f6f-4144-9e29-4db96e4fcb1e" containerID="e3869b59a5122fe45ea941c46900c5b12caffa842755e106eed0ba7db87cb2c0" exitCode=0 Nov 26 05:43:24 crc kubenswrapper[4871]: I1126 05:43:24.051230 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-kj54h" event={"ID":"09f49677-5f6f-4144-9e29-4db96e4fcb1e","Type":"ContainerDied","Data":"e3869b59a5122fe45ea941c46900c5b12caffa842755e106eed0ba7db87cb2c0"} Nov 26 05:43:24 crc kubenswrapper[4871]: I1126 05:43:24.051259 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-kj54h" event={"ID":"09f49677-5f6f-4144-9e29-4db96e4fcb1e","Type":"ContainerStarted","Data":"c365af5147ed751f6ead97f104726d93b3871ca780202a6b1b90c5745b1a1590"} Nov 26 05:43:24 crc kubenswrapper[4871]: I1126 05:43:24.053608 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-6b92-account-create-update-hzv5c" event={"ID":"07bd5933-72a5-4f99-b5f9-08d8cadc77e8","Type":"ContainerStarted","Data":"3cb3de7e18022b03f015c5b5076cb8bea79f1cf86da013952f62066c36a38cce"} Nov 26 05:43:24 crc kubenswrapper[4871]: I1126 05:43:24.053630 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-6b92-account-create-update-hzv5c" event={"ID":"07bd5933-72a5-4f99-b5f9-08d8cadc77e8","Type":"ContainerStarted","Data":"47409d8b7d3ec8874dd1526f24a71e63af3542c1a7026b1e6c6185609755ff4f"} Nov 26 05:43:24 crc kubenswrapper[4871]: I1126 05:43:24.055563 4871 generic.go:334] "Generic (PLEG): container finished" podID="d791f62b-80ce-4ea9-acdc-bfb288614bac" containerID="1e348798dd5b55775761ee4b70f95d158dab764cbc5777e9a0943efa94ba5558" exitCode=0 Nov 26 05:43:24 crc kubenswrapper[4871]: I1126 05:43:24.055602 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-k6ddc" event={"ID":"d791f62b-80ce-4ea9-acdc-bfb288614bac","Type":"ContainerDied","Data":"1e348798dd5b55775761ee4b70f95d158dab764cbc5777e9a0943efa94ba5558"} Nov 26 05:43:24 crc kubenswrapper[4871]: I1126 05:43:24.055617 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-k6ddc" event={"ID":"d791f62b-80ce-4ea9-acdc-bfb288614bac","Type":"ContainerStarted","Data":"6fd2f7465e9fd50079d87cfd05aedc47dc9e97869e3c5479848cc709a1d72db6"} Nov 26 05:43:24 crc kubenswrapper[4871]: I1126 05:43:24.056683 4871 generic.go:334] "Generic (PLEG): container finished" podID="a4666a76-36db-4a3e-a12d-0cfb82284f7a" containerID="1f54bb2588599460bb6ce0b0e2f42fabe04e0ac3b77d40b2fd98f9f5b76dd4cf" exitCode=0 Nov 26 05:43:24 crc kubenswrapper[4871]: I1126 05:43:24.056709 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-a517-account-create-update-zhft2" event={"ID":"a4666a76-36db-4a3e-a12d-0cfb82284f7a","Type":"ContainerDied","Data":"1f54bb2588599460bb6ce0b0e2f42fabe04e0ac3b77d40b2fd98f9f5b76dd4cf"} Nov 26 05:43:24 crc kubenswrapper[4871]: I1126 05:43:24.081054 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-6b92-account-create-update-hzv5c" podStartSLOduration=2.08103605 podStartE2EDuration="2.08103605s" podCreationTimestamp="2025-11-26 05:43:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:43:24.075683107 +0000 UTC m=+1062.258734733" watchObservedRunningTime="2025-11-26 05:43:24.08103605 +0000 UTC m=+1062.264087626" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.067312 4871 generic.go:334] "Generic (PLEG): container finished" podID="07bd5933-72a5-4f99-b5f9-08d8cadc77e8" containerID="3cb3de7e18022b03f015c5b5076cb8bea79f1cf86da013952f62066c36a38cce" exitCode=0 Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.069901 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-6b92-account-create-update-hzv5c" event={"ID":"07bd5933-72a5-4f99-b5f9-08d8cadc77e8","Type":"ContainerDied","Data":"3cb3de7e18022b03f015c5b5076cb8bea79f1cf86da013952f62066c36a38cce"} Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.079762 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-mw8tv"] Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.086718 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-mw8tv" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.109315 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-mw8tv"] Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.138169 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-sync-gggp7"] Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.139206 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-gggp7"] Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.139277 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-gggp7" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.152074 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-mph7b" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.152311 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-config-data" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.206916 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81b8252a-3d5b-4d62-9d38-c5e696bbe613-operator-scripts\") pod \"glance-db-create-mw8tv\" (UID: \"81b8252a-3d5b-4d62-9d38-c5e696bbe613\") " pod="openstack/glance-db-create-mw8tv" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.206980 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pj49z\" (UniqueName: \"kubernetes.io/projected/81b8252a-3d5b-4d62-9d38-c5e696bbe613-kube-api-access-pj49z\") pod \"glance-db-create-mw8tv\" (UID: \"81b8252a-3d5b-4d62-9d38-c5e696bbe613\") " pod="openstack/glance-db-create-mw8tv" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.297133 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-7821-account-create-update-f5m7h"] Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.298727 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-7821-account-create-update-f5m7h" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.301344 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.309673 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkhhz\" (UniqueName: \"kubernetes.io/projected/1e893829-69cb-4a4f-9b97-5b96332e5724-kube-api-access-vkhhz\") pod \"watcher-db-sync-gggp7\" (UID: \"1e893829-69cb-4a4f-9b97-5b96332e5724\") " pod="openstack/watcher-db-sync-gggp7" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.309789 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1e893829-69cb-4a4f-9b97-5b96332e5724-db-sync-config-data\") pod \"watcher-db-sync-gggp7\" (UID: \"1e893829-69cb-4a4f-9b97-5b96332e5724\") " pod="openstack/watcher-db-sync-gggp7" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.309845 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81b8252a-3d5b-4d62-9d38-c5e696bbe613-operator-scripts\") pod \"glance-db-create-mw8tv\" (UID: \"81b8252a-3d5b-4d62-9d38-c5e696bbe613\") " pod="openstack/glance-db-create-mw8tv" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.309867 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e893829-69cb-4a4f-9b97-5b96332e5724-config-data\") pod \"watcher-db-sync-gggp7\" (UID: \"1e893829-69cb-4a4f-9b97-5b96332e5724\") " pod="openstack/watcher-db-sync-gggp7" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.309896 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e893829-69cb-4a4f-9b97-5b96332e5724-combined-ca-bundle\") pod \"watcher-db-sync-gggp7\" (UID: \"1e893829-69cb-4a4f-9b97-5b96332e5724\") " pod="openstack/watcher-db-sync-gggp7" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.309923 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pj49z\" (UniqueName: \"kubernetes.io/projected/81b8252a-3d5b-4d62-9d38-c5e696bbe613-kube-api-access-pj49z\") pod \"glance-db-create-mw8tv\" (UID: \"81b8252a-3d5b-4d62-9d38-c5e696bbe613\") " pod="openstack/glance-db-create-mw8tv" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.310619 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81b8252a-3d5b-4d62-9d38-c5e696bbe613-operator-scripts\") pod \"glance-db-create-mw8tv\" (UID: \"81b8252a-3d5b-4d62-9d38-c5e696bbe613\") " pod="openstack/glance-db-create-mw8tv" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.311973 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-7821-account-create-update-f5m7h"] Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.333801 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pj49z\" (UniqueName: \"kubernetes.io/projected/81b8252a-3d5b-4d62-9d38-c5e696bbe613-kube-api-access-pj49z\") pod \"glance-db-create-mw8tv\" (UID: \"81b8252a-3d5b-4d62-9d38-c5e696bbe613\") " pod="openstack/glance-db-create-mw8tv" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.380200 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-hljkf"] Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.381311 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-hljkf" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.394677 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-hljkf"] Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.411564 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1e893829-69cb-4a4f-9b97-5b96332e5724-db-sync-config-data\") pod \"watcher-db-sync-gggp7\" (UID: \"1e893829-69cb-4a4f-9b97-5b96332e5724\") " pod="openstack/watcher-db-sync-gggp7" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.411779 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxhp2\" (UniqueName: \"kubernetes.io/projected/a9ce6687-ccf1-40e7-b1f8-b42502d5a149-kube-api-access-bxhp2\") pod \"glance-7821-account-create-update-f5m7h\" (UID: \"a9ce6687-ccf1-40e7-b1f8-b42502d5a149\") " pod="openstack/glance-7821-account-create-update-f5m7h" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.411880 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e893829-69cb-4a4f-9b97-5b96332e5724-config-data\") pod \"watcher-db-sync-gggp7\" (UID: \"1e893829-69cb-4a4f-9b97-5b96332e5724\") " pod="openstack/watcher-db-sync-gggp7" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.411965 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e893829-69cb-4a4f-9b97-5b96332e5724-combined-ca-bundle\") pod \"watcher-db-sync-gggp7\" (UID: \"1e893829-69cb-4a4f-9b97-5b96332e5724\") " pod="openstack/watcher-db-sync-gggp7" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.412069 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vkhhz\" (UniqueName: \"kubernetes.io/projected/1e893829-69cb-4a4f-9b97-5b96332e5724-kube-api-access-vkhhz\") pod \"watcher-db-sync-gggp7\" (UID: \"1e893829-69cb-4a4f-9b97-5b96332e5724\") " pod="openstack/watcher-db-sync-gggp7" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.412171 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9ce6687-ccf1-40e7-b1f8-b42502d5a149-operator-scripts\") pod \"glance-7821-account-create-update-f5m7h\" (UID: \"a9ce6687-ccf1-40e7-b1f8-b42502d5a149\") " pod="openstack/glance-7821-account-create-update-f5m7h" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.427883 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e893829-69cb-4a4f-9b97-5b96332e5724-config-data\") pod \"watcher-db-sync-gggp7\" (UID: \"1e893829-69cb-4a4f-9b97-5b96332e5724\") " pod="openstack/watcher-db-sync-gggp7" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.428823 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1e893829-69cb-4a4f-9b97-5b96332e5724-db-sync-config-data\") pod \"watcher-db-sync-gggp7\" (UID: \"1e893829-69cb-4a4f-9b97-5b96332e5724\") " pod="openstack/watcher-db-sync-gggp7" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.429825 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e893829-69cb-4a4f-9b97-5b96332e5724-combined-ca-bundle\") pod \"watcher-db-sync-gggp7\" (UID: \"1e893829-69cb-4a4f-9b97-5b96332e5724\") " pod="openstack/watcher-db-sync-gggp7" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.434740 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkhhz\" (UniqueName: \"kubernetes.io/projected/1e893829-69cb-4a4f-9b97-5b96332e5724-kube-api-access-vkhhz\") pod \"watcher-db-sync-gggp7\" (UID: \"1e893829-69cb-4a4f-9b97-5b96332e5724\") " pod="openstack/watcher-db-sync-gggp7" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.461409 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-mw8tv" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.472697 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-gggp7" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.498961 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-ac3b-account-create-update-dt6ts"] Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.504258 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ac3b-account-create-update-dt6ts" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.517070 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxhp2\" (UniqueName: \"kubernetes.io/projected/a9ce6687-ccf1-40e7-b1f8-b42502d5a149-kube-api-access-bxhp2\") pod \"glance-7821-account-create-update-f5m7h\" (UID: \"a9ce6687-ccf1-40e7-b1f8-b42502d5a149\") " pod="openstack/glance-7821-account-create-update-f5m7h" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.517247 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25db4f6b-d9ba-4147-8f75-8283a144bc17-operator-scripts\") pod \"neutron-db-create-hljkf\" (UID: \"25db4f6b-d9ba-4147-8f75-8283a144bc17\") " pod="openstack/neutron-db-create-hljkf" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.517295 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6rx9\" (UniqueName: \"kubernetes.io/projected/25db4f6b-d9ba-4147-8f75-8283a144bc17-kube-api-access-k6rx9\") pod \"neutron-db-create-hljkf\" (UID: \"25db4f6b-d9ba-4147-8f75-8283a144bc17\") " pod="openstack/neutron-db-create-hljkf" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.517319 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9ce6687-ccf1-40e7-b1f8-b42502d5a149-operator-scripts\") pod \"glance-7821-account-create-update-f5m7h\" (UID: \"a9ce6687-ccf1-40e7-b1f8-b42502d5a149\") " pod="openstack/glance-7821-account-create-update-f5m7h" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.527949 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9ce6687-ccf1-40e7-b1f8-b42502d5a149-operator-scripts\") pod \"glance-7821-account-create-update-f5m7h\" (UID: \"a9ce6687-ccf1-40e7-b1f8-b42502d5a149\") " pod="openstack/glance-7821-account-create-update-f5m7h" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.552141 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.556618 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxhp2\" (UniqueName: \"kubernetes.io/projected/a9ce6687-ccf1-40e7-b1f8-b42502d5a149-kube-api-access-bxhp2\") pod \"glance-7821-account-create-update-f5m7h\" (UID: \"a9ce6687-ccf1-40e7-b1f8-b42502d5a149\") " pod="openstack/glance-7821-account-create-update-f5m7h" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.574663 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-ac3b-account-create-update-dt6ts"] Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.589770 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-k6ddc" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.618584 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0601d4ca-135d-4d81-87cf-13e178ed9660-operator-scripts\") pod \"neutron-ac3b-account-create-update-dt6ts\" (UID: \"0601d4ca-135d-4d81-87cf-13e178ed9660\") " pod="openstack/neutron-ac3b-account-create-update-dt6ts" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.618866 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvjlw\" (UniqueName: \"kubernetes.io/projected/0601d4ca-135d-4d81-87cf-13e178ed9660-kube-api-access-gvjlw\") pod \"neutron-ac3b-account-create-update-dt6ts\" (UID: \"0601d4ca-135d-4d81-87cf-13e178ed9660\") " pod="openstack/neutron-ac3b-account-create-update-dt6ts" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.618921 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25db4f6b-d9ba-4147-8f75-8283a144bc17-operator-scripts\") pod \"neutron-db-create-hljkf\" (UID: \"25db4f6b-d9ba-4147-8f75-8283a144bc17\") " pod="openstack/neutron-db-create-hljkf" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.618960 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6rx9\" (UniqueName: \"kubernetes.io/projected/25db4f6b-d9ba-4147-8f75-8283a144bc17-kube-api-access-k6rx9\") pod \"neutron-db-create-hljkf\" (UID: \"25db4f6b-d9ba-4147-8f75-8283a144bc17\") " pod="openstack/neutron-db-create-hljkf" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.619725 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25db4f6b-d9ba-4147-8f75-8283a144bc17-operator-scripts\") pod \"neutron-db-create-hljkf\" (UID: \"25db4f6b-d9ba-4147-8f75-8283a144bc17\") " pod="openstack/neutron-db-create-hljkf" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.621221 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-7821-account-create-update-f5m7h" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.627921 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a517-account-create-update-zhft2" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.640150 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6rx9\" (UniqueName: \"kubernetes.io/projected/25db4f6b-d9ba-4147-8f75-8283a144bc17-kube-api-access-k6rx9\") pod \"neutron-db-create-hljkf\" (UID: \"25db4f6b-d9ba-4147-8f75-8283a144bc17\") " pod="openstack/neutron-db-create-hljkf" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.641157 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-kj54h" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.722062 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmj87\" (UniqueName: \"kubernetes.io/projected/a4666a76-36db-4a3e-a12d-0cfb82284f7a-kube-api-access-mmj87\") pod \"a4666a76-36db-4a3e-a12d-0cfb82284f7a\" (UID: \"a4666a76-36db-4a3e-a12d-0cfb82284f7a\") " Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.722198 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d791f62b-80ce-4ea9-acdc-bfb288614bac-operator-scripts\") pod \"d791f62b-80ce-4ea9-acdc-bfb288614bac\" (UID: \"d791f62b-80ce-4ea9-acdc-bfb288614bac\") " Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.722248 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-whz7b\" (UniqueName: \"kubernetes.io/projected/09f49677-5f6f-4144-9e29-4db96e4fcb1e-kube-api-access-whz7b\") pod \"09f49677-5f6f-4144-9e29-4db96e4fcb1e\" (UID: \"09f49677-5f6f-4144-9e29-4db96e4fcb1e\") " Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.722340 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t8bq4\" (UniqueName: \"kubernetes.io/projected/d791f62b-80ce-4ea9-acdc-bfb288614bac-kube-api-access-t8bq4\") pod \"d791f62b-80ce-4ea9-acdc-bfb288614bac\" (UID: \"d791f62b-80ce-4ea9-acdc-bfb288614bac\") " Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.722411 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/09f49677-5f6f-4144-9e29-4db96e4fcb1e-operator-scripts\") pod \"09f49677-5f6f-4144-9e29-4db96e4fcb1e\" (UID: \"09f49677-5f6f-4144-9e29-4db96e4fcb1e\") " Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.722466 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4666a76-36db-4a3e-a12d-0cfb82284f7a-operator-scripts\") pod \"a4666a76-36db-4a3e-a12d-0cfb82284f7a\" (UID: \"a4666a76-36db-4a3e-a12d-0cfb82284f7a\") " Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.722846 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0601d4ca-135d-4d81-87cf-13e178ed9660-operator-scripts\") pod \"neutron-ac3b-account-create-update-dt6ts\" (UID: \"0601d4ca-135d-4d81-87cf-13e178ed9660\") " pod="openstack/neutron-ac3b-account-create-update-dt6ts" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.722914 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvjlw\" (UniqueName: \"kubernetes.io/projected/0601d4ca-135d-4d81-87cf-13e178ed9660-kube-api-access-gvjlw\") pod \"neutron-ac3b-account-create-update-dt6ts\" (UID: \"0601d4ca-135d-4d81-87cf-13e178ed9660\") " pod="openstack/neutron-ac3b-account-create-update-dt6ts" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.722998 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09f49677-5f6f-4144-9e29-4db96e4fcb1e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "09f49677-5f6f-4144-9e29-4db96e4fcb1e" (UID: "09f49677-5f6f-4144-9e29-4db96e4fcb1e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.723002 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d791f62b-80ce-4ea9-acdc-bfb288614bac-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d791f62b-80ce-4ea9-acdc-bfb288614bac" (UID: "d791f62b-80ce-4ea9-acdc-bfb288614bac"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.723345 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4666a76-36db-4a3e-a12d-0cfb82284f7a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a4666a76-36db-4a3e-a12d-0cfb82284f7a" (UID: "a4666a76-36db-4a3e-a12d-0cfb82284f7a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.724234 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0601d4ca-135d-4d81-87cf-13e178ed9660-operator-scripts\") pod \"neutron-ac3b-account-create-update-dt6ts\" (UID: \"0601d4ca-135d-4d81-87cf-13e178ed9660\") " pod="openstack/neutron-ac3b-account-create-update-dt6ts" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.726812 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4666a76-36db-4a3e-a12d-0cfb82284f7a-kube-api-access-mmj87" (OuterVolumeSpecName: "kube-api-access-mmj87") pod "a4666a76-36db-4a3e-a12d-0cfb82284f7a" (UID: "a4666a76-36db-4a3e-a12d-0cfb82284f7a"). InnerVolumeSpecName "kube-api-access-mmj87". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.738820 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d791f62b-80ce-4ea9-acdc-bfb288614bac-kube-api-access-t8bq4" (OuterVolumeSpecName: "kube-api-access-t8bq4") pod "d791f62b-80ce-4ea9-acdc-bfb288614bac" (UID: "d791f62b-80ce-4ea9-acdc-bfb288614bac"). InnerVolumeSpecName "kube-api-access-t8bq4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.739139 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09f49677-5f6f-4144-9e29-4db96e4fcb1e-kube-api-access-whz7b" (OuterVolumeSpecName: "kube-api-access-whz7b") pod "09f49677-5f6f-4144-9e29-4db96e4fcb1e" (UID: "09f49677-5f6f-4144-9e29-4db96e4fcb1e"). InnerVolumeSpecName "kube-api-access-whz7b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.743101 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvjlw\" (UniqueName: \"kubernetes.io/projected/0601d4ca-135d-4d81-87cf-13e178ed9660-kube-api-access-gvjlw\") pod \"neutron-ac3b-account-create-update-dt6ts\" (UID: \"0601d4ca-135d-4d81-87cf-13e178ed9660\") " pod="openstack/neutron-ac3b-account-create-update-dt6ts" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.825128 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4666a76-36db-4a3e-a12d-0cfb82284f7a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.825160 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmj87\" (UniqueName: \"kubernetes.io/projected/a4666a76-36db-4a3e-a12d-0cfb82284f7a-kube-api-access-mmj87\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.825174 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d791f62b-80ce-4ea9-acdc-bfb288614bac-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.825182 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-whz7b\" (UniqueName: \"kubernetes.io/projected/09f49677-5f6f-4144-9e29-4db96e4fcb1e-kube-api-access-whz7b\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.825209 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t8bq4\" (UniqueName: \"kubernetes.io/projected/d791f62b-80ce-4ea9-acdc-bfb288614bac-kube-api-access-t8bq4\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.825393 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/09f49677-5f6f-4144-9e29-4db96e4fcb1e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.886517 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-hljkf" Nov 26 05:43:25 crc kubenswrapper[4871]: I1126 05:43:25.918684 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ac3b-account-create-update-dt6ts" Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.018139 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-mw8tv"] Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.042801 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-gggp7"] Nov 26 05:43:26 crc kubenswrapper[4871]: W1126 05:43:26.046991 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod81b8252a_3d5b_4d62_9d38_c5e696bbe613.slice/crio-5cf2e8614765f90a024a1838d288a2bd64e01fb69e85ab99bc7e58cd32d29ce0 WatchSource:0}: Error finding container 5cf2e8614765f90a024a1838d288a2bd64e01fb69e85ab99bc7e58cd32d29ce0: Status 404 returned error can't find the container with id 5cf2e8614765f90a024a1838d288a2bd64e01fb69e85ab99bc7e58cd32d29ce0 Nov 26 05:43:26 crc kubenswrapper[4871]: W1126 05:43:26.056539 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e893829_69cb_4a4f_9b97_5b96332e5724.slice/crio-f9498ec4a15a27cec60ac4b2814eb1d57fccde1e3aad2ea1eb68555982f813de WatchSource:0}: Error finding container f9498ec4a15a27cec60ac4b2814eb1d57fccde1e3aad2ea1eb68555982f813de: Status 404 returned error can't find the container with id f9498ec4a15a27cec60ac4b2814eb1d57fccde1e3aad2ea1eb68555982f813de Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.102150 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-k6ddc" Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.102358 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-k6ddc" event={"ID":"d791f62b-80ce-4ea9-acdc-bfb288614bac","Type":"ContainerDied","Data":"6fd2f7465e9fd50079d87cfd05aedc47dc9e97869e3c5479848cc709a1d72db6"} Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.103206 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6fd2f7465e9fd50079d87cfd05aedc47dc9e97869e3c5479848cc709a1d72db6" Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.104162 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-mw8tv" event={"ID":"81b8252a-3d5b-4d62-9d38-c5e696bbe613","Type":"ContainerStarted","Data":"5cf2e8614765f90a024a1838d288a2bd64e01fb69e85ab99bc7e58cd32d29ce0"} Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.106184 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-a517-account-create-update-zhft2" event={"ID":"a4666a76-36db-4a3e-a12d-0cfb82284f7a","Type":"ContainerDied","Data":"5dea1eb13c594d872c8505ea7160917245941e4ff43db4ed8d0b11cd2869322d"} Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.106214 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5dea1eb13c594d872c8505ea7160917245941e4ff43db4ed8d0b11cd2869322d" Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.106279 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a517-account-create-update-zhft2" Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.115959 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-kj54h" Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.116197 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-kj54h" event={"ID":"09f49677-5f6f-4144-9e29-4db96e4fcb1e","Type":"ContainerDied","Data":"c365af5147ed751f6ead97f104726d93b3871ca780202a6b1b90c5745b1a1590"} Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.116239 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c365af5147ed751f6ead97f104726d93b3871ca780202a6b1b90c5745b1a1590" Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.157311 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-7821-account-create-update-f5m7h"] Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.376028 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-ac3b-account-create-update-dt6ts"] Nov 26 05:43:26 crc kubenswrapper[4871]: W1126 05:43:26.401696 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0601d4ca_135d_4d81_87cf_13e178ed9660.slice/crio-d6b56530783e3c29188c3d98f9c33c33f1b7542ee8433f29683c1411356b700c WatchSource:0}: Error finding container d6b56530783e3c29188c3d98f9c33c33f1b7542ee8433f29683c1411356b700c: Status 404 returned error can't find the container with id d6b56530783e3c29188c3d98f9c33c33f1b7542ee8433f29683c1411356b700c Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.430473 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-hljkf"] Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.786971 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-6b92-account-create-update-hzv5c" Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.951488 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/07bd5933-72a5-4f99-b5f9-08d8cadc77e8-operator-scripts\") pod \"07bd5933-72a5-4f99-b5f9-08d8cadc77e8\" (UID: \"07bd5933-72a5-4f99-b5f9-08d8cadc77e8\") " Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.951666 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2b2h\" (UniqueName: \"kubernetes.io/projected/07bd5933-72a5-4f99-b5f9-08d8cadc77e8-kube-api-access-x2b2h\") pod \"07bd5933-72a5-4f99-b5f9-08d8cadc77e8\" (UID: \"07bd5933-72a5-4f99-b5f9-08d8cadc77e8\") " Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.954136 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07bd5933-72a5-4f99-b5f9-08d8cadc77e8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "07bd5933-72a5-4f99-b5f9-08d8cadc77e8" (UID: "07bd5933-72a5-4f99-b5f9-08d8cadc77e8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:43:26 crc kubenswrapper[4871]: I1126 05:43:26.961359 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07bd5933-72a5-4f99-b5f9-08d8cadc77e8-kube-api-access-x2b2h" (OuterVolumeSpecName: "kube-api-access-x2b2h") pod "07bd5933-72a5-4f99-b5f9-08d8cadc77e8" (UID: "07bd5933-72a5-4f99-b5f9-08d8cadc77e8"). InnerVolumeSpecName "kube-api-access-x2b2h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:43:27 crc kubenswrapper[4871]: I1126 05:43:27.053974 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/07bd5933-72a5-4f99-b5f9-08d8cadc77e8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:27 crc kubenswrapper[4871]: I1126 05:43:27.054002 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2b2h\" (UniqueName: \"kubernetes.io/projected/07bd5933-72a5-4f99-b5f9-08d8cadc77e8-kube-api-access-x2b2h\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:27 crc kubenswrapper[4871]: I1126 05:43:27.128328 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ac3b-account-create-update-dt6ts" event={"ID":"0601d4ca-135d-4d81-87cf-13e178ed9660","Type":"ContainerStarted","Data":"17b089a238476abee8212d46ffff81ee90b084d8f9639eac7cb698c4d51e9c5a"} Nov 26 05:43:27 crc kubenswrapper[4871]: I1126 05:43:27.128686 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ac3b-account-create-update-dt6ts" event={"ID":"0601d4ca-135d-4d81-87cf-13e178ed9660","Type":"ContainerStarted","Data":"d6b56530783e3c29188c3d98f9c33c33f1b7542ee8433f29683c1411356b700c"} Nov 26 05:43:27 crc kubenswrapper[4871]: I1126 05:43:27.132891 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-6b92-account-create-update-hzv5c" event={"ID":"07bd5933-72a5-4f99-b5f9-08d8cadc77e8","Type":"ContainerDied","Data":"47409d8b7d3ec8874dd1526f24a71e63af3542c1a7026b1e6c6185609755ff4f"} Nov 26 05:43:27 crc kubenswrapper[4871]: I1126 05:43:27.132916 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="47409d8b7d3ec8874dd1526f24a71e63af3542c1a7026b1e6c6185609755ff4f" Nov 26 05:43:27 crc kubenswrapper[4871]: I1126 05:43:27.132960 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-6b92-account-create-update-hzv5c" Nov 26 05:43:27 crc kubenswrapper[4871]: I1126 05:43:27.145154 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-ac3b-account-create-update-dt6ts" podStartSLOduration=2.145134662 podStartE2EDuration="2.145134662s" podCreationTimestamp="2025-11-26 05:43:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:43:27.139832101 +0000 UTC m=+1065.322883687" watchObservedRunningTime="2025-11-26 05:43:27.145134662 +0000 UTC m=+1065.328186248" Nov 26 05:43:27 crc kubenswrapper[4871]: I1126 05:43:27.146745 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-gggp7" event={"ID":"1e893829-69cb-4a4f-9b97-5b96332e5724","Type":"ContainerStarted","Data":"f9498ec4a15a27cec60ac4b2814eb1d57fccde1e3aad2ea1eb68555982f813de"} Nov 26 05:43:27 crc kubenswrapper[4871]: I1126 05:43:27.148194 4871 generic.go:334] "Generic (PLEG): container finished" podID="81b8252a-3d5b-4d62-9d38-c5e696bbe613" containerID="33fd2a391d54152eaf8b7e3b1456de56fe793bd89ee5268bbef8736bf39c15d6" exitCode=0 Nov 26 05:43:27 crc kubenswrapper[4871]: I1126 05:43:27.148250 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-mw8tv" event={"ID":"81b8252a-3d5b-4d62-9d38-c5e696bbe613","Type":"ContainerDied","Data":"33fd2a391d54152eaf8b7e3b1456de56fe793bd89ee5268bbef8736bf39c15d6"} Nov 26 05:43:27 crc kubenswrapper[4871]: I1126 05:43:27.154933 4871 generic.go:334] "Generic (PLEG): container finished" podID="25db4f6b-d9ba-4147-8f75-8283a144bc17" containerID="af3229e3f6d0be7e177777eb582543ac0a1185b964b940f73e9f713551687602" exitCode=0 Nov 26 05:43:27 crc kubenswrapper[4871]: I1126 05:43:27.155089 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-hljkf" event={"ID":"25db4f6b-d9ba-4147-8f75-8283a144bc17","Type":"ContainerDied","Data":"af3229e3f6d0be7e177777eb582543ac0a1185b964b940f73e9f713551687602"} Nov 26 05:43:27 crc kubenswrapper[4871]: I1126 05:43:27.155117 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-hljkf" event={"ID":"25db4f6b-d9ba-4147-8f75-8283a144bc17","Type":"ContainerStarted","Data":"17c8cee6cd728937437a2836de83648fb53ddf87ca89f8d5e99e7ae88b50ad68"} Nov 26 05:43:27 crc kubenswrapper[4871]: I1126 05:43:27.168101 4871 generic.go:334] "Generic (PLEG): container finished" podID="a9ce6687-ccf1-40e7-b1f8-b42502d5a149" containerID="7242948d12d8d24c3c33d7ef9950db597acaf4fa0d6bd5680c5d296e0da528f0" exitCode=0 Nov 26 05:43:27 crc kubenswrapper[4871]: I1126 05:43:27.168147 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-7821-account-create-update-f5m7h" event={"ID":"a9ce6687-ccf1-40e7-b1f8-b42502d5a149","Type":"ContainerDied","Data":"7242948d12d8d24c3c33d7ef9950db597acaf4fa0d6bd5680c5d296e0da528f0"} Nov 26 05:43:27 crc kubenswrapper[4871]: I1126 05:43:27.168174 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-7821-account-create-update-f5m7h" event={"ID":"a9ce6687-ccf1-40e7-b1f8-b42502d5a149","Type":"ContainerStarted","Data":"edf5f17543db3fc76e96237f05f803627faec6ebbfd80d37e3758e951e84ccbd"} Nov 26 05:43:28 crc kubenswrapper[4871]: I1126 05:43:28.179877 4871 generic.go:334] "Generic (PLEG): container finished" podID="0601d4ca-135d-4d81-87cf-13e178ed9660" containerID="17b089a238476abee8212d46ffff81ee90b084d8f9639eac7cb698c4d51e9c5a" exitCode=0 Nov 26 05:43:28 crc kubenswrapper[4871]: I1126 05:43:28.179993 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ac3b-account-create-update-dt6ts" event={"ID":"0601d4ca-135d-4d81-87cf-13e178ed9660","Type":"ContainerDied","Data":"17b089a238476abee8212d46ffff81ee90b084d8f9639eac7cb698c4d51e9c5a"} Nov 26 05:43:28 crc kubenswrapper[4871]: I1126 05:43:28.634750 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:43:28 crc kubenswrapper[4871]: I1126 05:43:28.693585 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76f9c4c8bc-56mq5"] Nov 26 05:43:28 crc kubenswrapper[4871]: I1126 05:43:28.693780 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" podUID="468d6a43-d467-4606-b7fd-a39e765a72e1" containerName="dnsmasq-dns" containerID="cri-o://3f21a7f8d9278353eaf1d8f49da2f1913305236e379a2e359f7aa9112776bc88" gracePeriod=10 Nov 26 05:43:28 crc kubenswrapper[4871]: E1126 05:43:28.959074 4871 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod468d6a43_d467_4606_b7fd_a39e765a72e1.slice/crio-3f21a7f8d9278353eaf1d8f49da2f1913305236e379a2e359f7aa9112776bc88.scope\": RecentStats: unable to find data in memory cache]" Nov 26 05:43:29 crc kubenswrapper[4871]: I1126 05:43:29.195052 4871 generic.go:334] "Generic (PLEG): container finished" podID="468d6a43-d467-4606-b7fd-a39e765a72e1" containerID="3f21a7f8d9278353eaf1d8f49da2f1913305236e379a2e359f7aa9112776bc88" exitCode=0 Nov 26 05:43:29 crc kubenswrapper[4871]: I1126 05:43:29.195224 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" event={"ID":"468d6a43-d467-4606-b7fd-a39e765a72e1","Type":"ContainerDied","Data":"3f21a7f8d9278353eaf1d8f49da2f1913305236e379a2e359f7aa9112776bc88"} Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.075039 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-7821-account-create-update-f5m7h" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.086205 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-mw8tv" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.139858 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-hljkf" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.163859 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ac3b-account-create-update-dt6ts" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.171762 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.205127 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-7821-account-create-update-f5m7h" event={"ID":"a9ce6687-ccf1-40e7-b1f8-b42502d5a149","Type":"ContainerDied","Data":"edf5f17543db3fc76e96237f05f803627faec6ebbfd80d37e3758e951e84ccbd"} Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.205164 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="edf5f17543db3fc76e96237f05f803627faec6ebbfd80d37e3758e951e84ccbd" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.205212 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-7821-account-create-update-f5m7h" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.208774 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" event={"ID":"468d6a43-d467-4606-b7fd-a39e765a72e1","Type":"ContainerDied","Data":"c0ee53108ef1efee48b0bfee0b628bb492b2f4bb91c3f56fb02e1e9617ef9346"} Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.208819 4871 scope.go:117] "RemoveContainer" containerID="3f21a7f8d9278353eaf1d8f49da2f1913305236e379a2e359f7aa9112776bc88" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.209233 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76f9c4c8bc-56mq5" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.215009 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ac3b-account-create-update-dt6ts" event={"ID":"0601d4ca-135d-4d81-87cf-13e178ed9660","Type":"ContainerDied","Data":"d6b56530783e3c29188c3d98f9c33c33f1b7542ee8433f29683c1411356b700c"} Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.215041 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d6b56530783e3c29188c3d98f9c33c33f1b7542ee8433f29683c1411356b700c" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.215134 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ac3b-account-create-update-dt6ts" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.217435 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-mw8tv" event={"ID":"81b8252a-3d5b-4d62-9d38-c5e696bbe613","Type":"ContainerDied","Data":"5cf2e8614765f90a024a1838d288a2bd64e01fb69e85ab99bc7e58cd32d29ce0"} Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.217468 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5cf2e8614765f90a024a1838d288a2bd64e01fb69e85ab99bc7e58cd32d29ce0" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.217612 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-mw8tv" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.218301 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81b8252a-3d5b-4d62-9d38-c5e696bbe613-operator-scripts\") pod \"81b8252a-3d5b-4d62-9d38-c5e696bbe613\" (UID: \"81b8252a-3d5b-4d62-9d38-c5e696bbe613\") " Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.218336 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxhp2\" (UniqueName: \"kubernetes.io/projected/a9ce6687-ccf1-40e7-b1f8-b42502d5a149-kube-api-access-bxhp2\") pod \"a9ce6687-ccf1-40e7-b1f8-b42502d5a149\" (UID: \"a9ce6687-ccf1-40e7-b1f8-b42502d5a149\") " Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.218376 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25db4f6b-d9ba-4147-8f75-8283a144bc17-operator-scripts\") pod \"25db4f6b-d9ba-4147-8f75-8283a144bc17\" (UID: \"25db4f6b-d9ba-4147-8f75-8283a144bc17\") " Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.218421 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6rx9\" (UniqueName: \"kubernetes.io/projected/25db4f6b-d9ba-4147-8f75-8283a144bc17-kube-api-access-k6rx9\") pod \"25db4f6b-d9ba-4147-8f75-8283a144bc17\" (UID: \"25db4f6b-d9ba-4147-8f75-8283a144bc17\") " Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.218485 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj49z\" (UniqueName: \"kubernetes.io/projected/81b8252a-3d5b-4d62-9d38-c5e696bbe613-kube-api-access-pj49z\") pod \"81b8252a-3d5b-4d62-9d38-c5e696bbe613\" (UID: \"81b8252a-3d5b-4d62-9d38-c5e696bbe613\") " Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.218515 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9ce6687-ccf1-40e7-b1f8-b42502d5a149-operator-scripts\") pod \"a9ce6687-ccf1-40e7-b1f8-b42502d5a149\" (UID: \"a9ce6687-ccf1-40e7-b1f8-b42502d5a149\") " Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.219739 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9ce6687-ccf1-40e7-b1f8-b42502d5a149-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a9ce6687-ccf1-40e7-b1f8-b42502d5a149" (UID: "a9ce6687-ccf1-40e7-b1f8-b42502d5a149"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.219749 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25db4f6b-d9ba-4147-8f75-8283a144bc17-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "25db4f6b-d9ba-4147-8f75-8283a144bc17" (UID: "25db4f6b-d9ba-4147-8f75-8283a144bc17"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.219747 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81b8252a-3d5b-4d62-9d38-c5e696bbe613-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "81b8252a-3d5b-4d62-9d38-c5e696bbe613" (UID: "81b8252a-3d5b-4d62-9d38-c5e696bbe613"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.221159 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-hljkf" event={"ID":"25db4f6b-d9ba-4147-8f75-8283a144bc17","Type":"ContainerDied","Data":"17c8cee6cd728937437a2836de83648fb53ddf87ca89f8d5e99e7ae88b50ad68"} Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.221211 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="17c8cee6cd728937437a2836de83648fb53ddf87ca89f8d5e99e7ae88b50ad68" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.221256 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-hljkf" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.224194 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9ce6687-ccf1-40e7-b1f8-b42502d5a149-kube-api-access-bxhp2" (OuterVolumeSpecName: "kube-api-access-bxhp2") pod "a9ce6687-ccf1-40e7-b1f8-b42502d5a149" (UID: "a9ce6687-ccf1-40e7-b1f8-b42502d5a149"). InnerVolumeSpecName "kube-api-access-bxhp2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.225769 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81b8252a-3d5b-4d62-9d38-c5e696bbe613-kube-api-access-pj49z" (OuterVolumeSpecName: "kube-api-access-pj49z") pod "81b8252a-3d5b-4d62-9d38-c5e696bbe613" (UID: "81b8252a-3d5b-4d62-9d38-c5e696bbe613"). InnerVolumeSpecName "kube-api-access-pj49z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.225861 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25db4f6b-d9ba-4147-8f75-8283a144bc17-kube-api-access-k6rx9" (OuterVolumeSpecName: "kube-api-access-k6rx9") pod "25db4f6b-d9ba-4147-8f75-8283a144bc17" (UID: "25db4f6b-d9ba-4147-8f75-8283a144bc17"). InnerVolumeSpecName "kube-api-access-k6rx9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.236441 4871 scope.go:117] "RemoveContainer" containerID="6243b584b51b95380c8f8cc43682f689e5b3c304b5512dd3b508b0509d7265bd" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.319788 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5z2z\" (UniqueName: \"kubernetes.io/projected/468d6a43-d467-4606-b7fd-a39e765a72e1-kube-api-access-x5z2z\") pod \"468d6a43-d467-4606-b7fd-a39e765a72e1\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.319881 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-config\") pod \"468d6a43-d467-4606-b7fd-a39e765a72e1\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.319903 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0601d4ca-135d-4d81-87cf-13e178ed9660-operator-scripts\") pod \"0601d4ca-135d-4d81-87cf-13e178ed9660\" (UID: \"0601d4ca-135d-4d81-87cf-13e178ed9660\") " Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.319958 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-ovsdbserver-nb\") pod \"468d6a43-d467-4606-b7fd-a39e765a72e1\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.319985 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-ovsdbserver-sb\") pod \"468d6a43-d467-4606-b7fd-a39e765a72e1\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.320090 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gvjlw\" (UniqueName: \"kubernetes.io/projected/0601d4ca-135d-4d81-87cf-13e178ed9660-kube-api-access-gvjlw\") pod \"0601d4ca-135d-4d81-87cf-13e178ed9660\" (UID: \"0601d4ca-135d-4d81-87cf-13e178ed9660\") " Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.320138 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-dns-svc\") pod \"468d6a43-d467-4606-b7fd-a39e765a72e1\" (UID: \"468d6a43-d467-4606-b7fd-a39e765a72e1\") " Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.320507 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81b8252a-3d5b-4d62-9d38-c5e696bbe613-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.320538 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxhp2\" (UniqueName: \"kubernetes.io/projected/a9ce6687-ccf1-40e7-b1f8-b42502d5a149-kube-api-access-bxhp2\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.320551 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25db4f6b-d9ba-4147-8f75-8283a144bc17-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.320559 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6rx9\" (UniqueName: \"kubernetes.io/projected/25db4f6b-d9ba-4147-8f75-8283a144bc17-kube-api-access-k6rx9\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.320567 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj49z\" (UniqueName: \"kubernetes.io/projected/81b8252a-3d5b-4d62-9d38-c5e696bbe613-kube-api-access-pj49z\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.320575 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9ce6687-ccf1-40e7-b1f8-b42502d5a149-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.320727 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0601d4ca-135d-4d81-87cf-13e178ed9660-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0601d4ca-135d-4d81-87cf-13e178ed9660" (UID: "0601d4ca-135d-4d81-87cf-13e178ed9660"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.324677 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/468d6a43-d467-4606-b7fd-a39e765a72e1-kube-api-access-x5z2z" (OuterVolumeSpecName: "kube-api-access-x5z2z") pod "468d6a43-d467-4606-b7fd-a39e765a72e1" (UID: "468d6a43-d467-4606-b7fd-a39e765a72e1"). InnerVolumeSpecName "kube-api-access-x5z2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.334824 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0601d4ca-135d-4d81-87cf-13e178ed9660-kube-api-access-gvjlw" (OuterVolumeSpecName: "kube-api-access-gvjlw") pod "0601d4ca-135d-4d81-87cf-13e178ed9660" (UID: "0601d4ca-135d-4d81-87cf-13e178ed9660"). InnerVolumeSpecName "kube-api-access-gvjlw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.369016 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "468d6a43-d467-4606-b7fd-a39e765a72e1" (UID: "468d6a43-d467-4606-b7fd-a39e765a72e1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.371030 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "468d6a43-d467-4606-b7fd-a39e765a72e1" (UID: "468d6a43-d467-4606-b7fd-a39e765a72e1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.376925 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-config" (OuterVolumeSpecName: "config") pod "468d6a43-d467-4606-b7fd-a39e765a72e1" (UID: "468d6a43-d467-4606-b7fd-a39e765a72e1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.381928 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "468d6a43-d467-4606-b7fd-a39e765a72e1" (UID: "468d6a43-d467-4606-b7fd-a39e765a72e1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.422048 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.422085 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0601d4ca-135d-4d81-87cf-13e178ed9660-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.422099 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.422111 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.422125 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gvjlw\" (UniqueName: \"kubernetes.io/projected/0601d4ca-135d-4d81-87cf-13e178ed9660-kube-api-access-gvjlw\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.422137 4871 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/468d6a43-d467-4606-b7fd-a39e765a72e1-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.422149 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5z2z\" (UniqueName: \"kubernetes.io/projected/468d6a43-d467-4606-b7fd-a39e765a72e1-kube-api-access-x5z2z\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.599888 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76f9c4c8bc-56mq5"] Nov 26 05:43:30 crc kubenswrapper[4871]: I1126 05:43:30.606839 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-76f9c4c8bc-56mq5"] Nov 26 05:43:31 crc kubenswrapper[4871]: I1126 05:43:31.229771 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-2mr78" event={"ID":"136e68fc-176f-4240-9876-53e81cc4caab","Type":"ContainerStarted","Data":"03c2a2301d01ead888510ff8d480fc972954ab66ec9f544fc5751c2dc780b749"} Nov 26 05:43:31 crc kubenswrapper[4871]: I1126 05:43:31.252199 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-2mr78" podStartSLOduration=2.981603931 podStartE2EDuration="9.252175687s" podCreationTimestamp="2025-11-26 05:43:22 +0000 UTC" firstStartedPulling="2025-11-26 05:43:23.690599141 +0000 UTC m=+1061.873650717" lastFinishedPulling="2025-11-26 05:43:29.961170887 +0000 UTC m=+1068.144222473" observedRunningTime="2025-11-26 05:43:31.250010894 +0000 UTC m=+1069.433062510" watchObservedRunningTime="2025-11-26 05:43:31.252175687 +0000 UTC m=+1069.435227283" Nov 26 05:43:32 crc kubenswrapper[4871]: I1126 05:43:32.529679 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="468d6a43-d467-4606-b7fd-a39e765a72e1" path="/var/lib/kubelet/pods/468d6a43-d467-4606-b7fd-a39e765a72e1/volumes" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.277167 4871 generic.go:334] "Generic (PLEG): container finished" podID="136e68fc-176f-4240-9876-53e81cc4caab" containerID="03c2a2301d01ead888510ff8d480fc972954ab66ec9f544fc5751c2dc780b749" exitCode=0 Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.277237 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-2mr78" event={"ID":"136e68fc-176f-4240-9876-53e81cc4caab","Type":"ContainerDied","Data":"03c2a2301d01ead888510ff8d480fc972954ab66ec9f544fc5751c2dc780b749"} Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.467755 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-9kf7b"] Nov 26 05:43:35 crc kubenswrapper[4871]: E1126 05:43:35.468316 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4666a76-36db-4a3e-a12d-0cfb82284f7a" containerName="mariadb-account-create-update" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.468348 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4666a76-36db-4a3e-a12d-0cfb82284f7a" containerName="mariadb-account-create-update" Nov 26 05:43:35 crc kubenswrapper[4871]: E1126 05:43:35.468363 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09f49677-5f6f-4144-9e29-4db96e4fcb1e" containerName="mariadb-database-create" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.468375 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="09f49677-5f6f-4144-9e29-4db96e4fcb1e" containerName="mariadb-database-create" Nov 26 05:43:35 crc kubenswrapper[4871]: E1126 05:43:35.468395 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81b8252a-3d5b-4d62-9d38-c5e696bbe613" containerName="mariadb-database-create" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.468406 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="81b8252a-3d5b-4d62-9d38-c5e696bbe613" containerName="mariadb-database-create" Nov 26 05:43:35 crc kubenswrapper[4871]: E1126 05:43:35.468432 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07bd5933-72a5-4f99-b5f9-08d8cadc77e8" containerName="mariadb-account-create-update" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.468442 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="07bd5933-72a5-4f99-b5f9-08d8cadc77e8" containerName="mariadb-account-create-update" Nov 26 05:43:35 crc kubenswrapper[4871]: E1126 05:43:35.468466 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="468d6a43-d467-4606-b7fd-a39e765a72e1" containerName="dnsmasq-dns" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.468477 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="468d6a43-d467-4606-b7fd-a39e765a72e1" containerName="dnsmasq-dns" Nov 26 05:43:35 crc kubenswrapper[4871]: E1126 05:43:35.468492 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25db4f6b-d9ba-4147-8f75-8283a144bc17" containerName="mariadb-database-create" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.468501 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="25db4f6b-d9ba-4147-8f75-8283a144bc17" containerName="mariadb-database-create" Nov 26 05:43:35 crc kubenswrapper[4871]: E1126 05:43:35.468552 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9ce6687-ccf1-40e7-b1f8-b42502d5a149" containerName="mariadb-account-create-update" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.468564 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9ce6687-ccf1-40e7-b1f8-b42502d5a149" containerName="mariadb-account-create-update" Nov 26 05:43:35 crc kubenswrapper[4871]: E1126 05:43:35.468585 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="468d6a43-d467-4606-b7fd-a39e765a72e1" containerName="init" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.468595 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="468d6a43-d467-4606-b7fd-a39e765a72e1" containerName="init" Nov 26 05:43:35 crc kubenswrapper[4871]: E1126 05:43:35.468655 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0601d4ca-135d-4d81-87cf-13e178ed9660" containerName="mariadb-account-create-update" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.468666 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="0601d4ca-135d-4d81-87cf-13e178ed9660" containerName="mariadb-account-create-update" Nov 26 05:43:35 crc kubenswrapper[4871]: E1126 05:43:35.468684 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d791f62b-80ce-4ea9-acdc-bfb288614bac" containerName="mariadb-database-create" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.468692 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="d791f62b-80ce-4ea9-acdc-bfb288614bac" containerName="mariadb-database-create" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.468960 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="d791f62b-80ce-4ea9-acdc-bfb288614bac" containerName="mariadb-database-create" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.468984 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="468d6a43-d467-4606-b7fd-a39e765a72e1" containerName="dnsmasq-dns" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.468999 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4666a76-36db-4a3e-a12d-0cfb82284f7a" containerName="mariadb-account-create-update" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.469019 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="07bd5933-72a5-4f99-b5f9-08d8cadc77e8" containerName="mariadb-account-create-update" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.469037 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="25db4f6b-d9ba-4147-8f75-8283a144bc17" containerName="mariadb-database-create" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.469050 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9ce6687-ccf1-40e7-b1f8-b42502d5a149" containerName="mariadb-account-create-update" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.469065 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="0601d4ca-135d-4d81-87cf-13e178ed9660" containerName="mariadb-account-create-update" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.469080 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="81b8252a-3d5b-4d62-9d38-c5e696bbe613" containerName="mariadb-database-create" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.469109 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="09f49677-5f6f-4144-9e29-4db96e4fcb1e" containerName="mariadb-database-create" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.470030 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-9kf7b" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.476261 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.477495 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-2br48" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.485738 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-9kf7b"] Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.540147 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-db-sync-config-data\") pod \"glance-db-sync-9kf7b\" (UID: \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\") " pod="openstack/glance-db-sync-9kf7b" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.540231 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t92vp\" (UniqueName: \"kubernetes.io/projected/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-kube-api-access-t92vp\") pod \"glance-db-sync-9kf7b\" (UID: \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\") " pod="openstack/glance-db-sync-9kf7b" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.540377 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-combined-ca-bundle\") pod \"glance-db-sync-9kf7b\" (UID: \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\") " pod="openstack/glance-db-sync-9kf7b" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.540418 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-config-data\") pod \"glance-db-sync-9kf7b\" (UID: \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\") " pod="openstack/glance-db-sync-9kf7b" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.641614 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-combined-ca-bundle\") pod \"glance-db-sync-9kf7b\" (UID: \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\") " pod="openstack/glance-db-sync-9kf7b" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.641670 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-config-data\") pod \"glance-db-sync-9kf7b\" (UID: \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\") " pod="openstack/glance-db-sync-9kf7b" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.641705 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-db-sync-config-data\") pod \"glance-db-sync-9kf7b\" (UID: \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\") " pod="openstack/glance-db-sync-9kf7b" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.641751 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t92vp\" (UniqueName: \"kubernetes.io/projected/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-kube-api-access-t92vp\") pod \"glance-db-sync-9kf7b\" (UID: \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\") " pod="openstack/glance-db-sync-9kf7b" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.657053 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-db-sync-config-data\") pod \"glance-db-sync-9kf7b\" (UID: \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\") " pod="openstack/glance-db-sync-9kf7b" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.660019 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-combined-ca-bundle\") pod \"glance-db-sync-9kf7b\" (UID: \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\") " pod="openstack/glance-db-sync-9kf7b" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.662240 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-config-data\") pod \"glance-db-sync-9kf7b\" (UID: \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\") " pod="openstack/glance-db-sync-9kf7b" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.664891 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t92vp\" (UniqueName: \"kubernetes.io/projected/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-kube-api-access-t92vp\") pod \"glance-db-sync-9kf7b\" (UID: \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\") " pod="openstack/glance-db-sync-9kf7b" Nov 26 05:43:35 crc kubenswrapper[4871]: I1126 05:43:35.798307 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-9kf7b" Nov 26 05:43:36 crc kubenswrapper[4871]: I1126 05:43:36.585801 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-2mr78" Nov 26 05:43:36 crc kubenswrapper[4871]: I1126 05:43:36.760876 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qqhq4\" (UniqueName: \"kubernetes.io/projected/136e68fc-176f-4240-9876-53e81cc4caab-kube-api-access-qqhq4\") pod \"136e68fc-176f-4240-9876-53e81cc4caab\" (UID: \"136e68fc-176f-4240-9876-53e81cc4caab\") " Nov 26 05:43:36 crc kubenswrapper[4871]: I1126 05:43:36.761263 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/136e68fc-176f-4240-9876-53e81cc4caab-config-data\") pod \"136e68fc-176f-4240-9876-53e81cc4caab\" (UID: \"136e68fc-176f-4240-9876-53e81cc4caab\") " Nov 26 05:43:36 crc kubenswrapper[4871]: I1126 05:43:36.761320 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/136e68fc-176f-4240-9876-53e81cc4caab-combined-ca-bundle\") pod \"136e68fc-176f-4240-9876-53e81cc4caab\" (UID: \"136e68fc-176f-4240-9876-53e81cc4caab\") " Nov 26 05:43:36 crc kubenswrapper[4871]: I1126 05:43:36.765349 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/136e68fc-176f-4240-9876-53e81cc4caab-kube-api-access-qqhq4" (OuterVolumeSpecName: "kube-api-access-qqhq4") pod "136e68fc-176f-4240-9876-53e81cc4caab" (UID: "136e68fc-176f-4240-9876-53e81cc4caab"). InnerVolumeSpecName "kube-api-access-qqhq4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:43:36 crc kubenswrapper[4871]: I1126 05:43:36.785975 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/136e68fc-176f-4240-9876-53e81cc4caab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "136e68fc-176f-4240-9876-53e81cc4caab" (UID: "136e68fc-176f-4240-9876-53e81cc4caab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:43:36 crc kubenswrapper[4871]: I1126 05:43:36.804722 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/136e68fc-176f-4240-9876-53e81cc4caab-config-data" (OuterVolumeSpecName: "config-data") pod "136e68fc-176f-4240-9876-53e81cc4caab" (UID: "136e68fc-176f-4240-9876-53e81cc4caab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:43:36 crc kubenswrapper[4871]: I1126 05:43:36.863342 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qqhq4\" (UniqueName: \"kubernetes.io/projected/136e68fc-176f-4240-9876-53e81cc4caab-kube-api-access-qqhq4\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:36 crc kubenswrapper[4871]: I1126 05:43:36.863385 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/136e68fc-176f-4240-9876-53e81cc4caab-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:36 crc kubenswrapper[4871]: I1126 05:43:36.863397 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/136e68fc-176f-4240-9876-53e81cc4caab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:36 crc kubenswrapper[4871]: I1126 05:43:36.901285 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-9kf7b"] Nov 26 05:43:36 crc kubenswrapper[4871]: W1126 05:43:36.903013 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4385b1e_f5ee_4a06_8e3e_5d06d7fad5a6.slice/crio-1ed612055e88b29dab36966117aad22fba1a95c3bb1162a284e2924b700069af WatchSource:0}: Error finding container 1ed612055e88b29dab36966117aad22fba1a95c3bb1162a284e2924b700069af: Status 404 returned error can't find the container with id 1ed612055e88b29dab36966117aad22fba1a95c3bb1162a284e2924b700069af Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.303322 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-2mr78" event={"ID":"136e68fc-176f-4240-9876-53e81cc4caab","Type":"ContainerDied","Data":"694724d091f1e88faf63eb1a004125a8c306daf1a1d16bd4d74b21a7d23a2b7e"} Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.303365 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="694724d091f1e88faf63eb1a004125a8c306daf1a1d16bd4d74b21a7d23a2b7e" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.303383 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-2mr78" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.304671 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-gggp7" event={"ID":"1e893829-69cb-4a4f-9b97-5b96332e5724","Type":"ContainerStarted","Data":"4cad5d60e79ce469f85069056ad86015e022a74a277ba0f87ec936c22221c73a"} Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.305594 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-9kf7b" event={"ID":"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6","Type":"ContainerStarted","Data":"1ed612055e88b29dab36966117aad22fba1a95c3bb1162a284e2924b700069af"} Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.327115 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-db-sync-gggp7" podStartSLOduration=1.973066778 podStartE2EDuration="12.327095163s" podCreationTimestamp="2025-11-26 05:43:25 +0000 UTC" firstStartedPulling="2025-11-26 05:43:26.071644293 +0000 UTC m=+1064.254695879" lastFinishedPulling="2025-11-26 05:43:36.425672678 +0000 UTC m=+1074.608724264" observedRunningTime="2025-11-26 05:43:37.320061158 +0000 UTC m=+1075.503112744" watchObservedRunningTime="2025-11-26 05:43:37.327095163 +0000 UTC m=+1075.510146749" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.580047 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-58bbf48b7f-5wrm8"] Nov 26 05:43:37 crc kubenswrapper[4871]: E1126 05:43:37.580546 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="136e68fc-176f-4240-9876-53e81cc4caab" containerName="keystone-db-sync" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.580565 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="136e68fc-176f-4240-9876-53e81cc4caab" containerName="keystone-db-sync" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.580773 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="136e68fc-176f-4240-9876-53e81cc4caab" containerName="keystone-db-sync" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.582081 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.588385 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58bbf48b7f-5wrm8"] Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.667640 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-gzt89"] Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.669293 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.676404 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-gzt89"] Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.677741 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.677994 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.678173 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-w7bjw" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.679683 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.683011 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-credential-keys\") pod \"keystone-bootstrap-gzt89\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.683061 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-dns-svc\") pod \"dnsmasq-dns-58bbf48b7f-5wrm8\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.683088 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-ovsdbserver-sb\") pod \"dnsmasq-dns-58bbf48b7f-5wrm8\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.683134 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-dns-swift-storage-0\") pod \"dnsmasq-dns-58bbf48b7f-5wrm8\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.683159 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-fernet-keys\") pod \"keystone-bootstrap-gzt89\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.683178 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whqt6\" (UniqueName: \"kubernetes.io/projected/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-kube-api-access-whqt6\") pod \"dnsmasq-dns-58bbf48b7f-5wrm8\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.683225 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-config-data\") pod \"keystone-bootstrap-gzt89\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.683243 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-ovsdbserver-nb\") pod \"dnsmasq-dns-58bbf48b7f-5wrm8\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.683262 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-config\") pod \"dnsmasq-dns-58bbf48b7f-5wrm8\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.683268 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.683302 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-combined-ca-bundle\") pod \"keystone-bootstrap-gzt89\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.683334 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wk449\" (UniqueName: \"kubernetes.io/projected/d4fc7db5-083a-4a96-ac48-081df4923f57-kube-api-access-wk449\") pod \"keystone-bootstrap-gzt89\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.683352 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-scripts\") pod \"keystone-bootstrap-gzt89\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.772943 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-kvkr5"] Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.774007 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-kvkr5" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.779910 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.780058 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-xhmrf" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.780492 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.784481 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-config-data\") pod \"keystone-bootstrap-gzt89\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.784513 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-ovsdbserver-nb\") pod \"dnsmasq-dns-58bbf48b7f-5wrm8\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.784548 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-config\") pod \"dnsmasq-dns-58bbf48b7f-5wrm8\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.784579 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-combined-ca-bundle\") pod \"keystone-bootstrap-gzt89\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.784620 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wk449\" (UniqueName: \"kubernetes.io/projected/d4fc7db5-083a-4a96-ac48-081df4923f57-kube-api-access-wk449\") pod \"keystone-bootstrap-gzt89\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.784636 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-scripts\") pod \"keystone-bootstrap-gzt89\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.784674 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-credential-keys\") pod \"keystone-bootstrap-gzt89\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.784701 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-dns-svc\") pod \"dnsmasq-dns-58bbf48b7f-5wrm8\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.784723 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/977ad0ca-daf1-4b9d-b75a-c697ff3239c2-config\") pod \"neutron-db-sync-kvkr5\" (UID: \"977ad0ca-daf1-4b9d-b75a-c697ff3239c2\") " pod="openstack/neutron-db-sync-kvkr5" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.784747 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-ovsdbserver-sb\") pod \"dnsmasq-dns-58bbf48b7f-5wrm8\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.784769 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-dns-swift-storage-0\") pod \"dnsmasq-dns-58bbf48b7f-5wrm8\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.784785 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4flf\" (UniqueName: \"kubernetes.io/projected/977ad0ca-daf1-4b9d-b75a-c697ff3239c2-kube-api-access-f4flf\") pod \"neutron-db-sync-kvkr5\" (UID: \"977ad0ca-daf1-4b9d-b75a-c697ff3239c2\") " pod="openstack/neutron-db-sync-kvkr5" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.784807 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/977ad0ca-daf1-4b9d-b75a-c697ff3239c2-combined-ca-bundle\") pod \"neutron-db-sync-kvkr5\" (UID: \"977ad0ca-daf1-4b9d-b75a-c697ff3239c2\") " pod="openstack/neutron-db-sync-kvkr5" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.784823 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-fernet-keys\") pod \"keystone-bootstrap-gzt89\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.784847 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whqt6\" (UniqueName: \"kubernetes.io/projected/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-kube-api-access-whqt6\") pod \"dnsmasq-dns-58bbf48b7f-5wrm8\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.786289 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-dns-swift-storage-0\") pod \"dnsmasq-dns-58bbf48b7f-5wrm8\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.786910 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-ovsdbserver-sb\") pod \"dnsmasq-dns-58bbf48b7f-5wrm8\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.787133 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-dns-svc\") pod \"dnsmasq-dns-58bbf48b7f-5wrm8\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.789355 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-ovsdbserver-nb\") pod \"dnsmasq-dns-58bbf48b7f-5wrm8\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.791110 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-config\") pod \"dnsmasq-dns-58bbf48b7f-5wrm8\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.793021 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-fernet-keys\") pod \"keystone-bootstrap-gzt89\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.793561 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-config-data\") pod \"keystone-bootstrap-gzt89\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.794320 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-combined-ca-bundle\") pod \"keystone-bootstrap-gzt89\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.796404 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-kvkr5"] Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.802597 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-scripts\") pod \"keystone-bootstrap-gzt89\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.808717 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-67f98f67b9-25fgx"] Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.812139 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-credential-keys\") pod \"keystone-bootstrap-gzt89\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.815825 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.827607 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whqt6\" (UniqueName: \"kubernetes.io/projected/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-kube-api-access-whqt6\") pod \"dnsmasq-dns-58bbf48b7f-5wrm8\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.832951 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.833049 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.833612 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-dst6m" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.833726 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.839082 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wk449\" (UniqueName: \"kubernetes.io/projected/d4fc7db5-083a-4a96-ac48-081df4923f57-kube-api-access-wk449\") pod \"keystone-bootstrap-gzt89\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.844985 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-67f98f67b9-25fgx"] Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.887437 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4flf\" (UniqueName: \"kubernetes.io/projected/977ad0ca-daf1-4b9d-b75a-c697ff3239c2-kube-api-access-f4flf\") pod \"neutron-db-sync-kvkr5\" (UID: \"977ad0ca-daf1-4b9d-b75a-c697ff3239c2\") " pod="openstack/neutron-db-sync-kvkr5" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.887674 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/977ad0ca-daf1-4b9d-b75a-c697ff3239c2-combined-ca-bundle\") pod \"neutron-db-sync-kvkr5\" (UID: \"977ad0ca-daf1-4b9d-b75a-c697ff3239c2\") " pod="openstack/neutron-db-sync-kvkr5" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.888222 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/977ad0ca-daf1-4b9d-b75a-c697ff3239c2-config\") pod \"neutron-db-sync-kvkr5\" (UID: \"977ad0ca-daf1-4b9d-b75a-c697ff3239c2\") " pod="openstack/neutron-db-sync-kvkr5" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.900456 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/977ad0ca-daf1-4b9d-b75a-c697ff3239c2-config\") pod \"neutron-db-sync-kvkr5\" (UID: \"977ad0ca-daf1-4b9d-b75a-c697ff3239c2\") " pod="openstack/neutron-db-sync-kvkr5" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.915998 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/977ad0ca-daf1-4b9d-b75a-c697ff3239c2-combined-ca-bundle\") pod \"neutron-db-sync-kvkr5\" (UID: \"977ad0ca-daf1-4b9d-b75a-c697ff3239c2\") " pod="openstack/neutron-db-sync-kvkr5" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.924250 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.942953 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4flf\" (UniqueName: \"kubernetes.io/projected/977ad0ca-daf1-4b9d-b75a-c697ff3239c2-kube-api-access-f4flf\") pod \"neutron-db-sync-kvkr5\" (UID: \"977ad0ca-daf1-4b9d-b75a-c697ff3239c2\") " pod="openstack/neutron-db-sync-kvkr5" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.955750 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.958063 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.973122 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.973356 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.990984 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a392cdb-377e-4047-a1f4-f190429fe076-config-data\") pod \"horizon-67f98f67b9-25fgx\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.991015 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjzj7\" (UniqueName: \"kubernetes.io/projected/9b724414-8682-4e73-8b2d-305fce381613-kube-api-access-sjzj7\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.991051 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3a392cdb-377e-4047-a1f4-f190429fe076-horizon-secret-key\") pod \"horizon-67f98f67b9-25fgx\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.991081 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.991103 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b724414-8682-4e73-8b2d-305fce381613-run-httpd\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.991151 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.991165 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b724414-8682-4e73-8b2d-305fce381613-log-httpd\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.991195 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-config-data\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.991216 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggnph\" (UniqueName: \"kubernetes.io/projected/3a392cdb-377e-4047-a1f4-f190429fe076-kube-api-access-ggnph\") pod \"horizon-67f98f67b9-25fgx\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.991230 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-scripts\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.991247 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a392cdb-377e-4047-a1f4-f190429fe076-logs\") pod \"horizon-67f98f67b9-25fgx\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:43:37 crc kubenswrapper[4871]: I1126 05:43:37.991262 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3a392cdb-377e-4047-a1f4-f190429fe076-scripts\") pod \"horizon-67f98f67b9-25fgx\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:37.993284 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.003209 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-7pzbd"] Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.004344 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.013179 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.019800 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.020212 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.020404 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-8qdzr" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.045141 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-7pzbd"] Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.076601 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7f4dbc4c7c-tkmkl"] Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.078069 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7f4dbc4c7c-tkmkl"] Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.078159 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f4dbc4c7c-tkmkl" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.092655 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3a392cdb-377e-4047-a1f4-f190429fe076-horizon-secret-key\") pod \"horizon-67f98f67b9-25fgx\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.092702 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.092728 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b724414-8682-4e73-8b2d-305fce381613-run-httpd\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.092779 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.092794 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b724414-8682-4e73-8b2d-305fce381613-log-httpd\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.092837 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-config-data\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.092862 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggnph\" (UniqueName: \"kubernetes.io/projected/3a392cdb-377e-4047-a1f4-f190429fe076-kube-api-access-ggnph\") pod \"horizon-67f98f67b9-25fgx\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.092878 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-scripts\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.092895 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a392cdb-377e-4047-a1f4-f190429fe076-logs\") pod \"horizon-67f98f67b9-25fgx\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.092909 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3a392cdb-377e-4047-a1f4-f190429fe076-scripts\") pod \"horizon-67f98f67b9-25fgx\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.092938 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a392cdb-377e-4047-a1f4-f190429fe076-config-data\") pod \"horizon-67f98f67b9-25fgx\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.092954 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjzj7\" (UniqueName: \"kubernetes.io/projected/9b724414-8682-4e73-8b2d-305fce381613-kube-api-access-sjzj7\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.104358 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b724414-8682-4e73-8b2d-305fce381613-run-httpd\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.104747 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-kvkr5" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.105565 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3a392cdb-377e-4047-a1f4-f190429fe076-scripts\") pod \"horizon-67f98f67b9-25fgx\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.106166 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a392cdb-377e-4047-a1f4-f190429fe076-logs\") pod \"horizon-67f98f67b9-25fgx\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.106400 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a392cdb-377e-4047-a1f4-f190429fe076-config-data\") pod \"horizon-67f98f67b9-25fgx\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.107504 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b724414-8682-4e73-8b2d-305fce381613-log-httpd\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.111944 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3a392cdb-377e-4047-a1f4-f190429fe076-horizon-secret-key\") pod \"horizon-67f98f67b9-25fgx\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.112821 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-config-data\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.113047 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-scripts\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.114124 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.114632 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.125925 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-lwfp5"] Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.127095 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-lwfp5" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.130651 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-lwfp5"] Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.148104 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.148287 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-tgrk2" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.153240 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggnph\" (UniqueName: \"kubernetes.io/projected/3a392cdb-377e-4047-a1f4-f190429fe076-kube-api-access-ggnph\") pod \"horizon-67f98f67b9-25fgx\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.187576 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjzj7\" (UniqueName: \"kubernetes.io/projected/9b724414-8682-4e73-8b2d-305fce381613-kube-api-access-sjzj7\") pod \"ceilometer-0\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " pod="openstack/ceilometer-0" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.187639 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58bbf48b7f-5wrm8"] Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.193855 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cn8cb\" (UniqueName: \"kubernetes.io/projected/3f3f046f-4c4d-4c85-9d61-043c3006ea05-kube-api-access-cn8cb\") pod \"horizon-7f4dbc4c7c-tkmkl\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " pod="openstack/horizon-7f4dbc4c7c-tkmkl" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.193891 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-db-sync-config-data\") pod \"cinder-db-sync-7pzbd\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.193927 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3f3f046f-4c4d-4c85-9d61-043c3006ea05-config-data\") pod \"horizon-7f4dbc4c7c-tkmkl\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " pod="openstack/horizon-7f4dbc4c7c-tkmkl" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.193944 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f3f046f-4c4d-4c85-9d61-043c3006ea05-scripts\") pod \"horizon-7f4dbc4c7c-tkmkl\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " pod="openstack/horizon-7f4dbc4c7c-tkmkl" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.193968 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f3f046f-4c4d-4c85-9d61-043c3006ea05-logs\") pod \"horizon-7f4dbc4c7c-tkmkl\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " pod="openstack/horizon-7f4dbc4c7c-tkmkl" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.193985 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-scripts\") pod \"cinder-db-sync-7pzbd\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.194024 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-combined-ca-bundle\") pod \"cinder-db-sync-7pzbd\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.194061 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9dnr\" (UniqueName: \"kubernetes.io/projected/c8bdb9c7-91c3-40dc-920e-6e333b18f331-kube-api-access-w9dnr\") pod \"cinder-db-sync-7pzbd\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.194080 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3f3f046f-4c4d-4c85-9d61-043c3006ea05-horizon-secret-key\") pod \"horizon-7f4dbc4c7c-tkmkl\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " pod="openstack/horizon-7f4dbc4c7c-tkmkl" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.194107 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c8bdb9c7-91c3-40dc-920e-6e333b18f331-etc-machine-id\") pod \"cinder-db-sync-7pzbd\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.194148 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-config-data\") pod \"cinder-db-sync-7pzbd\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.215824 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-6z5bf"] Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.217506 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-6z5bf" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.222295 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-j5phm" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.233948 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.234197 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.235029 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-6z5bf"] Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.252065 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-578598f949-zhj9v"] Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.253410 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.275129 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-578598f949-zhj9v"] Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.305508 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5tsq\" (UniqueName: \"kubernetes.io/projected/2788cfc5-005e-4f99-83ac-9011cbe838cf-kube-api-access-v5tsq\") pod \"dnsmasq-dns-578598f949-zhj9v\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.305585 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3f3f046f-4c4d-4c85-9d61-043c3006ea05-config-data\") pod \"horizon-7f4dbc4c7c-tkmkl\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " pod="openstack/horizon-7f4dbc4c7c-tkmkl" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.305608 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f3f046f-4c4d-4c85-9d61-043c3006ea05-scripts\") pod \"horizon-7f4dbc4c7c-tkmkl\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " pod="openstack/horizon-7f4dbc4c7c-tkmkl" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.305630 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f3f046f-4c4d-4c85-9d61-043c3006ea05-logs\") pod \"horizon-7f4dbc4c7c-tkmkl\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " pod="openstack/horizon-7f4dbc4c7c-tkmkl" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.305649 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-config\") pod \"dnsmasq-dns-578598f949-zhj9v\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.306088 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f3f046f-4c4d-4c85-9d61-043c3006ea05-logs\") pod \"horizon-7f4dbc4c7c-tkmkl\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " pod="openstack/horizon-7f4dbc4c7c-tkmkl" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.307430 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.310491 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3f3f046f-4c4d-4c85-9d61-043c3006ea05-config-data\") pod \"horizon-7f4dbc4c7c-tkmkl\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " pod="openstack/horizon-7f4dbc4c7c-tkmkl" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.305770 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-scripts\") pod \"cinder-db-sync-7pzbd\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.310786 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-ovsdbserver-nb\") pod \"dnsmasq-dns-578598f949-zhj9v\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.310849 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e72bbf93-367f-4207-b846-b9cf819b9b4c-combined-ca-bundle\") pod \"barbican-db-sync-lwfp5\" (UID: \"e72bbf93-367f-4207-b846-b9cf819b9b4c\") " pod="openstack/barbican-db-sync-lwfp5" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.310876 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-dns-swift-storage-0\") pod \"dnsmasq-dns-578598f949-zhj9v\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.310903 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztplr\" (UniqueName: \"kubernetes.io/projected/e72bbf93-367f-4207-b846-b9cf819b9b4c-kube-api-access-ztplr\") pod \"barbican-db-sync-lwfp5\" (UID: \"e72bbf93-367f-4207-b846-b9cf819b9b4c\") " pod="openstack/barbican-db-sync-lwfp5" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.310956 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-combined-ca-bundle\") pod \"cinder-db-sync-7pzbd\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.311010 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/450179f7-baf0-481d-ad0e-4d3534ee28f4-scripts\") pod \"placement-db-sync-6z5bf\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " pod="openstack/placement-db-sync-6z5bf" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.311052 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e72bbf93-367f-4207-b846-b9cf819b9b4c-db-sync-config-data\") pod \"barbican-db-sync-lwfp5\" (UID: \"e72bbf93-367f-4207-b846-b9cf819b9b4c\") " pod="openstack/barbican-db-sync-lwfp5" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.311079 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9dnr\" (UniqueName: \"kubernetes.io/projected/c8bdb9c7-91c3-40dc-920e-6e333b18f331-kube-api-access-w9dnr\") pod \"cinder-db-sync-7pzbd\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.311108 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3f3f046f-4c4d-4c85-9d61-043c3006ea05-horizon-secret-key\") pod \"horizon-7f4dbc4c7c-tkmkl\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " pod="openstack/horizon-7f4dbc4c7c-tkmkl" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.311167 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c8bdb9c7-91c3-40dc-920e-6e333b18f331-etc-machine-id\") pod \"cinder-db-sync-7pzbd\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.311202 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-ovsdbserver-sb\") pod \"dnsmasq-dns-578598f949-zhj9v\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.311238 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hm6dx\" (UniqueName: \"kubernetes.io/projected/450179f7-baf0-481d-ad0e-4d3534ee28f4-kube-api-access-hm6dx\") pod \"placement-db-sync-6z5bf\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " pod="openstack/placement-db-sync-6z5bf" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.311335 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/450179f7-baf0-481d-ad0e-4d3534ee28f4-config-data\") pod \"placement-db-sync-6z5bf\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " pod="openstack/placement-db-sync-6z5bf" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.311363 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-config-data\") pod \"cinder-db-sync-7pzbd\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.311383 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/450179f7-baf0-481d-ad0e-4d3534ee28f4-logs\") pod \"placement-db-sync-6z5bf\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " pod="openstack/placement-db-sync-6z5bf" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.311410 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cn8cb\" (UniqueName: \"kubernetes.io/projected/3f3f046f-4c4d-4c85-9d61-043c3006ea05-kube-api-access-cn8cb\") pod \"horizon-7f4dbc4c7c-tkmkl\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " pod="openstack/horizon-7f4dbc4c7c-tkmkl" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.311429 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-dns-svc\") pod \"dnsmasq-dns-578598f949-zhj9v\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.311456 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/450179f7-baf0-481d-ad0e-4d3534ee28f4-combined-ca-bundle\") pod \"placement-db-sync-6z5bf\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " pod="openstack/placement-db-sync-6z5bf" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.311479 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-db-sync-config-data\") pod \"cinder-db-sync-7pzbd\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.315302 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f3f046f-4c4d-4c85-9d61-043c3006ea05-scripts\") pod \"horizon-7f4dbc4c7c-tkmkl\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " pod="openstack/horizon-7f4dbc4c7c-tkmkl" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.317585 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c8bdb9c7-91c3-40dc-920e-6e333b18f331-etc-machine-id\") pod \"cinder-db-sync-7pzbd\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.321383 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-db-sync-config-data\") pod \"cinder-db-sync-7pzbd\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.326722 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-combined-ca-bundle\") pod \"cinder-db-sync-7pzbd\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.327023 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3f3f046f-4c4d-4c85-9d61-043c3006ea05-horizon-secret-key\") pod \"horizon-7f4dbc4c7c-tkmkl\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " pod="openstack/horizon-7f4dbc4c7c-tkmkl" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.327207 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-config-data\") pod \"cinder-db-sync-7pzbd\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.327626 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-scripts\") pod \"cinder-db-sync-7pzbd\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.334376 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.353007 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9dnr\" (UniqueName: \"kubernetes.io/projected/c8bdb9c7-91c3-40dc-920e-6e333b18f331-kube-api-access-w9dnr\") pod \"cinder-db-sync-7pzbd\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.394128 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cn8cb\" (UniqueName: \"kubernetes.io/projected/3f3f046f-4c4d-4c85-9d61-043c3006ea05-kube-api-access-cn8cb\") pod \"horizon-7f4dbc4c7c-tkmkl\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " pod="openstack/horizon-7f4dbc4c7c-tkmkl" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.422052 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-dns-swift-storage-0\") pod \"dnsmasq-dns-578598f949-zhj9v\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.422088 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztplr\" (UniqueName: \"kubernetes.io/projected/e72bbf93-367f-4207-b846-b9cf819b9b4c-kube-api-access-ztplr\") pod \"barbican-db-sync-lwfp5\" (UID: \"e72bbf93-367f-4207-b846-b9cf819b9b4c\") " pod="openstack/barbican-db-sync-lwfp5" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.422125 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/450179f7-baf0-481d-ad0e-4d3534ee28f4-scripts\") pod \"placement-db-sync-6z5bf\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " pod="openstack/placement-db-sync-6z5bf" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.422149 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e72bbf93-367f-4207-b846-b9cf819b9b4c-db-sync-config-data\") pod \"barbican-db-sync-lwfp5\" (UID: \"e72bbf93-367f-4207-b846-b9cf819b9b4c\") " pod="openstack/barbican-db-sync-lwfp5" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.422188 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-ovsdbserver-sb\") pod \"dnsmasq-dns-578598f949-zhj9v\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.422208 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hm6dx\" (UniqueName: \"kubernetes.io/projected/450179f7-baf0-481d-ad0e-4d3534ee28f4-kube-api-access-hm6dx\") pod \"placement-db-sync-6z5bf\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " pod="openstack/placement-db-sync-6z5bf" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.422237 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/450179f7-baf0-481d-ad0e-4d3534ee28f4-config-data\") pod \"placement-db-sync-6z5bf\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " pod="openstack/placement-db-sync-6z5bf" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.422255 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/450179f7-baf0-481d-ad0e-4d3534ee28f4-logs\") pod \"placement-db-sync-6z5bf\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " pod="openstack/placement-db-sync-6z5bf" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.422280 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-dns-svc\") pod \"dnsmasq-dns-578598f949-zhj9v\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.422300 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/450179f7-baf0-481d-ad0e-4d3534ee28f4-combined-ca-bundle\") pod \"placement-db-sync-6z5bf\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " pod="openstack/placement-db-sync-6z5bf" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.422329 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5tsq\" (UniqueName: \"kubernetes.io/projected/2788cfc5-005e-4f99-83ac-9011cbe838cf-kube-api-access-v5tsq\") pod \"dnsmasq-dns-578598f949-zhj9v\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.422360 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-config\") pod \"dnsmasq-dns-578598f949-zhj9v\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.422375 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-ovsdbserver-nb\") pod \"dnsmasq-dns-578598f949-zhj9v\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.422396 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e72bbf93-367f-4207-b846-b9cf819b9b4c-combined-ca-bundle\") pod \"barbican-db-sync-lwfp5\" (UID: \"e72bbf93-367f-4207-b846-b9cf819b9b4c\") " pod="openstack/barbican-db-sync-lwfp5" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.423793 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-dns-swift-storage-0\") pod \"dnsmasq-dns-578598f949-zhj9v\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.430202 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e72bbf93-367f-4207-b846-b9cf819b9b4c-combined-ca-bundle\") pod \"barbican-db-sync-lwfp5\" (UID: \"e72bbf93-367f-4207-b846-b9cf819b9b4c\") " pod="openstack/barbican-db-sync-lwfp5" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.433322 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/450179f7-baf0-481d-ad0e-4d3534ee28f4-config-data\") pod \"placement-db-sync-6z5bf\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " pod="openstack/placement-db-sync-6z5bf" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.433895 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/450179f7-baf0-481d-ad0e-4d3534ee28f4-scripts\") pod \"placement-db-sync-6z5bf\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " pod="openstack/placement-db-sync-6z5bf" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.434124 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-config\") pod \"dnsmasq-dns-578598f949-zhj9v\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.434518 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-ovsdbserver-nb\") pod \"dnsmasq-dns-578598f949-zhj9v\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.434731 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/450179f7-baf0-481d-ad0e-4d3534ee28f4-logs\") pod \"placement-db-sync-6z5bf\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " pod="openstack/placement-db-sync-6z5bf" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.435346 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-ovsdbserver-sb\") pod \"dnsmasq-dns-578598f949-zhj9v\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.435362 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-dns-svc\") pod \"dnsmasq-dns-578598f949-zhj9v\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.445230 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e72bbf93-367f-4207-b846-b9cf819b9b4c-db-sync-config-data\") pod \"barbican-db-sync-lwfp5\" (UID: \"e72bbf93-367f-4207-b846-b9cf819b9b4c\") " pod="openstack/barbican-db-sync-lwfp5" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.447831 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztplr\" (UniqueName: \"kubernetes.io/projected/e72bbf93-367f-4207-b846-b9cf819b9b4c-kube-api-access-ztplr\") pod \"barbican-db-sync-lwfp5\" (UID: \"e72bbf93-367f-4207-b846-b9cf819b9b4c\") " pod="openstack/barbican-db-sync-lwfp5" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.453429 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/450179f7-baf0-481d-ad0e-4d3534ee28f4-combined-ca-bundle\") pod \"placement-db-sync-6z5bf\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " pod="openstack/placement-db-sync-6z5bf" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.458988 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5tsq\" (UniqueName: \"kubernetes.io/projected/2788cfc5-005e-4f99-83ac-9011cbe838cf-kube-api-access-v5tsq\") pod \"dnsmasq-dns-578598f949-zhj9v\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.469261 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hm6dx\" (UniqueName: \"kubernetes.io/projected/450179f7-baf0-481d-ad0e-4d3534ee28f4-kube-api-access-hm6dx\") pod \"placement-db-sync-6z5bf\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " pod="openstack/placement-db-sync-6z5bf" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.516695 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f4dbc4c7c-tkmkl" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.563252 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-lwfp5" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.587836 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-6z5bf" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.601182 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.650570 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.720334 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58bbf48b7f-5wrm8"] Nov 26 05:43:38 crc kubenswrapper[4871]: I1126 05:43:38.961787 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-kvkr5"] Nov 26 05:43:38 crc kubenswrapper[4871]: W1126 05:43:38.987490 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod977ad0ca_daf1_4b9d_b75a_c697ff3239c2.slice/crio-105fb12375adcce0f56a236d7c131a522c5f953649273a5892af901e95a09e2d WatchSource:0}: Error finding container 105fb12375adcce0f56a236d7c131a522c5f953649273a5892af901e95a09e2d: Status 404 returned error can't find the container with id 105fb12375adcce0f56a236d7c131a522c5f953649273a5892af901e95a09e2d Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.090336 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.108600 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-gzt89"] Nov 26 05:43:39 crc kubenswrapper[4871]: W1126 05:43:39.136066 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b724414_8682_4e73_8b2d_305fce381613.slice/crio-eb4b57b6a55dc1bcf36abda5f3da18db0b640113bc05ecf387d379d5733a9fef WatchSource:0}: Error finding container eb4b57b6a55dc1bcf36abda5f3da18db0b640113bc05ecf387d379d5733a9fef: Status 404 returned error can't find the container with id eb4b57b6a55dc1bcf36abda5f3da18db0b640113bc05ecf387d379d5733a9fef Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.136514 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-67f98f67b9-25fgx"] Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.242875 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-6z5bf"] Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.266423 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7f4dbc4c7c-tkmkl"] Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.280263 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-lwfp5"] Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.441765 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-lwfp5" event={"ID":"e72bbf93-367f-4207-b846-b9cf819b9b4c","Type":"ContainerStarted","Data":"9e9cb5ad259ed221b84957b20ad52a59114f07f826ec1ceaacc4c74790489e87"} Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.474493 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-7pzbd"] Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.486241 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-578598f949-zhj9v"] Nov 26 05:43:39 crc kubenswrapper[4871]: W1126 05:43:39.488732 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2788cfc5_005e_4f99_83ac_9011cbe838cf.slice/crio-0871c35cba3d17d4152761d76f60d886f59d320a796ac5c03a1935d893d185b9 WatchSource:0}: Error finding container 0871c35cba3d17d4152761d76f60d886f59d320a796ac5c03a1935d893d185b9: Status 404 returned error can't find the container with id 0871c35cba3d17d4152761d76f60d886f59d320a796ac5c03a1935d893d185b9 Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.491491 4871 generic.go:334] "Generic (PLEG): container finished" podID="048c6d5e-d95d-406b-a2ee-3d0b0de4c336" containerID="49dc64fdd5361925c43861279211b4472ed08182084aec02a6ad8d141b1bbebc" exitCode=0 Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.491600 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" event={"ID":"048c6d5e-d95d-406b-a2ee-3d0b0de4c336","Type":"ContainerDied","Data":"49dc64fdd5361925c43861279211b4472ed08182084aec02a6ad8d141b1bbebc"} Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.491629 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" event={"ID":"048c6d5e-d95d-406b-a2ee-3d0b0de4c336","Type":"ContainerStarted","Data":"00beab9451c8bd9ed30452fb3c6926ca3e149f1dfaadede57c50a6cb637f52ce"} Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.497127 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67f98f67b9-25fgx" event={"ID":"3a392cdb-377e-4047-a1f4-f190429fe076","Type":"ContainerStarted","Data":"c028e4e4ac4123436009cc22f9edb7f403a4f9a7d4454c859a70e38a86ed71de"} Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.499535 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f4dbc4c7c-tkmkl" event={"ID":"3f3f046f-4c4d-4c85-9d61-043c3006ea05","Type":"ContainerStarted","Data":"94511934e403a878b30b92d89cd60c557bb560f9ecb88f1050f70408638746b0"} Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.514770 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-kvkr5" event={"ID":"977ad0ca-daf1-4b9d-b75a-c697ff3239c2","Type":"ContainerStarted","Data":"0d7d4bcebf3e501276514ff5ba7838d4951fc3f90c68c7d6b24ffd745bdf19a6"} Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.514812 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-kvkr5" event={"ID":"977ad0ca-daf1-4b9d-b75a-c697ff3239c2","Type":"ContainerStarted","Data":"105fb12375adcce0f56a236d7c131a522c5f953649273a5892af901e95a09e2d"} Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.533382 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b724414-8682-4e73-8b2d-305fce381613","Type":"ContainerStarted","Data":"eb4b57b6a55dc1bcf36abda5f3da18db0b640113bc05ecf387d379d5733a9fef"} Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.536029 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-6z5bf" event={"ID":"450179f7-baf0-481d-ad0e-4d3534ee28f4","Type":"ContainerStarted","Data":"25421678217b9c7800f001ab6e440214418c18f7f034ccdd1269ba19c8c287e0"} Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.540199 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gzt89" event={"ID":"d4fc7db5-083a-4a96-ac48-081df4923f57","Type":"ContainerStarted","Data":"80f653f1f49ea6a8bc0a49af029f7235a6e20f2c0333fc2f21be1d9d654ec329"} Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.540224 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gzt89" event={"ID":"d4fc7db5-083a-4a96-ac48-081df4923f57","Type":"ContainerStarted","Data":"0155f0a0c67e8299ae5b41f611dd0f641db02eb05d0ff103f720572a353a2cf2"} Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.540860 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-kvkr5" podStartSLOduration=2.540843206 podStartE2EDuration="2.540843206s" podCreationTimestamp="2025-11-26 05:43:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:43:39.534780916 +0000 UTC m=+1077.717832502" watchObservedRunningTime="2025-11-26 05:43:39.540843206 +0000 UTC m=+1077.723894792" Nov 26 05:43:39 crc kubenswrapper[4871]: I1126 05:43:39.572483 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-gzt89" podStartSLOduration=2.57246588 podStartE2EDuration="2.57246588s" podCreationTimestamp="2025-11-26 05:43:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:43:39.55793851 +0000 UTC m=+1077.740990096" watchObservedRunningTime="2025-11-26 05:43:39.57246588 +0000 UTC m=+1077.755517466" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:39.987249 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.070257 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-67f98f67b9-25fgx"] Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.100824 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.113687 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-65fb6dbd77-xt6d5"] Nov 26 05:43:40 crc kubenswrapper[4871]: E1126 05:43:40.114156 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="048c6d5e-d95d-406b-a2ee-3d0b0de4c336" containerName="init" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.114176 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="048c6d5e-d95d-406b-a2ee-3d0b0de4c336" containerName="init" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.114357 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="048c6d5e-d95d-406b-a2ee-3d0b0de4c336" containerName="init" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.115327 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-65fb6dbd77-xt6d5" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.131703 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-65fb6dbd77-xt6d5"] Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.190453 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-ovsdbserver-nb\") pod \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.192927 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-dns-svc\") pod \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.193037 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-config\") pod \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.193158 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-dns-swift-storage-0\") pod \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.193248 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-ovsdbserver-sb\") pod \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.193359 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-whqt6\" (UniqueName: \"kubernetes.io/projected/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-kube-api-access-whqt6\") pod \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\" (UID: \"048c6d5e-d95d-406b-a2ee-3d0b0de4c336\") " Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.193871 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6l9g\" (UniqueName: \"kubernetes.io/projected/dbb025f2-31cc-41c2-886d-4cd68ed17d44-kube-api-access-z6l9g\") pod \"horizon-65fb6dbd77-xt6d5\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " pod="openstack/horizon-65fb6dbd77-xt6d5" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.193980 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dbb025f2-31cc-41c2-886d-4cd68ed17d44-horizon-secret-key\") pod \"horizon-65fb6dbd77-xt6d5\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " pod="openstack/horizon-65fb6dbd77-xt6d5" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.194103 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dbb025f2-31cc-41c2-886d-4cd68ed17d44-config-data\") pod \"horizon-65fb6dbd77-xt6d5\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " pod="openstack/horizon-65fb6dbd77-xt6d5" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.194218 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dbb025f2-31cc-41c2-886d-4cd68ed17d44-scripts\") pod \"horizon-65fb6dbd77-xt6d5\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " pod="openstack/horizon-65fb6dbd77-xt6d5" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.194319 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dbb025f2-31cc-41c2-886d-4cd68ed17d44-logs\") pod \"horizon-65fb6dbd77-xt6d5\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " pod="openstack/horizon-65fb6dbd77-xt6d5" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.214146 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-kube-api-access-whqt6" (OuterVolumeSpecName: "kube-api-access-whqt6") pod "048c6d5e-d95d-406b-a2ee-3d0b0de4c336" (UID: "048c6d5e-d95d-406b-a2ee-3d0b0de4c336"). InnerVolumeSpecName "kube-api-access-whqt6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.228710 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "048c6d5e-d95d-406b-a2ee-3d0b0de4c336" (UID: "048c6d5e-d95d-406b-a2ee-3d0b0de4c336"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.244615 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "048c6d5e-d95d-406b-a2ee-3d0b0de4c336" (UID: "048c6d5e-d95d-406b-a2ee-3d0b0de4c336"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.246093 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "048c6d5e-d95d-406b-a2ee-3d0b0de4c336" (UID: "048c6d5e-d95d-406b-a2ee-3d0b0de4c336"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.247116 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "048c6d5e-d95d-406b-a2ee-3d0b0de4c336" (UID: "048c6d5e-d95d-406b-a2ee-3d0b0de4c336"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.251681 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-config" (OuterVolumeSpecName: "config") pod "048c6d5e-d95d-406b-a2ee-3d0b0de4c336" (UID: "048c6d5e-d95d-406b-a2ee-3d0b0de4c336"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.297118 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6l9g\" (UniqueName: \"kubernetes.io/projected/dbb025f2-31cc-41c2-886d-4cd68ed17d44-kube-api-access-z6l9g\") pod \"horizon-65fb6dbd77-xt6d5\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " pod="openstack/horizon-65fb6dbd77-xt6d5" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.297177 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dbb025f2-31cc-41c2-886d-4cd68ed17d44-horizon-secret-key\") pod \"horizon-65fb6dbd77-xt6d5\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " pod="openstack/horizon-65fb6dbd77-xt6d5" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.297204 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dbb025f2-31cc-41c2-886d-4cd68ed17d44-config-data\") pod \"horizon-65fb6dbd77-xt6d5\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " pod="openstack/horizon-65fb6dbd77-xt6d5" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.297261 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dbb025f2-31cc-41c2-886d-4cd68ed17d44-scripts\") pod \"horizon-65fb6dbd77-xt6d5\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " pod="openstack/horizon-65fb6dbd77-xt6d5" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.297387 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dbb025f2-31cc-41c2-886d-4cd68ed17d44-logs\") pod \"horizon-65fb6dbd77-xt6d5\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " pod="openstack/horizon-65fb6dbd77-xt6d5" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.297774 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.297885 4871 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.297924 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.297938 4871 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.297956 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.297969 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-whqt6\" (UniqueName: \"kubernetes.io/projected/048c6d5e-d95d-406b-a2ee-3d0b0de4c336-kube-api-access-whqt6\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.298459 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dbb025f2-31cc-41c2-886d-4cd68ed17d44-logs\") pod \"horizon-65fb6dbd77-xt6d5\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " pod="openstack/horizon-65fb6dbd77-xt6d5" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.299070 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dbb025f2-31cc-41c2-886d-4cd68ed17d44-config-data\") pod \"horizon-65fb6dbd77-xt6d5\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " pod="openstack/horizon-65fb6dbd77-xt6d5" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.299351 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dbb025f2-31cc-41c2-886d-4cd68ed17d44-scripts\") pod \"horizon-65fb6dbd77-xt6d5\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " pod="openstack/horizon-65fb6dbd77-xt6d5" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.300915 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dbb025f2-31cc-41c2-886d-4cd68ed17d44-horizon-secret-key\") pod \"horizon-65fb6dbd77-xt6d5\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " pod="openstack/horizon-65fb6dbd77-xt6d5" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.312227 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6l9g\" (UniqueName: \"kubernetes.io/projected/dbb025f2-31cc-41c2-886d-4cd68ed17d44-kube-api-access-z6l9g\") pod \"horizon-65fb6dbd77-xt6d5\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " pod="openstack/horizon-65fb6dbd77-xt6d5" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.447621 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-65fb6dbd77-xt6d5" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.550186 4871 generic.go:334] "Generic (PLEG): container finished" podID="2788cfc5-005e-4f99-83ac-9011cbe838cf" containerID="67005b550bd76231f9967f6ba0536c9d13c10df2f1ef507fd3f567579baee20e" exitCode=0 Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.550257 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578598f949-zhj9v" event={"ID":"2788cfc5-005e-4f99-83ac-9011cbe838cf","Type":"ContainerDied","Data":"67005b550bd76231f9967f6ba0536c9d13c10df2f1ef507fd3f567579baee20e"} Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.550324 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578598f949-zhj9v" event={"ID":"2788cfc5-005e-4f99-83ac-9011cbe838cf","Type":"ContainerStarted","Data":"0871c35cba3d17d4152761d76f60d886f59d320a796ac5c03a1935d893d185b9"} Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.553628 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" event={"ID":"048c6d5e-d95d-406b-a2ee-3d0b0de4c336","Type":"ContainerDied","Data":"00beab9451c8bd9ed30452fb3c6926ca3e149f1dfaadede57c50a6cb637f52ce"} Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.553688 4871 scope.go:117] "RemoveContainer" containerID="49dc64fdd5361925c43861279211b4472ed08182084aec02a6ad8d141b1bbebc" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.553742 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58bbf48b7f-5wrm8" Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.565582 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-7pzbd" event={"ID":"c8bdb9c7-91c3-40dc-920e-6e333b18f331","Type":"ContainerStarted","Data":"ca3addd3f65ad75944482261e2ad3a0edc47c590bbea5f15af8074c94166ef2d"} Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.628390 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58bbf48b7f-5wrm8"] Nov 26 05:43:40 crc kubenswrapper[4871]: I1126 05:43:40.646403 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-58bbf48b7f-5wrm8"] Nov 26 05:43:41 crc kubenswrapper[4871]: I1126 05:43:41.053817 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-65fb6dbd77-xt6d5"] Nov 26 05:43:41 crc kubenswrapper[4871]: W1126 05:43:41.065247 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddbb025f2_31cc_41c2_886d_4cd68ed17d44.slice/crio-1ff6ae06d4caebbdfcb99e13793c5660c7896a91d9d6daa7fa54fe792680fda4 WatchSource:0}: Error finding container 1ff6ae06d4caebbdfcb99e13793c5660c7896a91d9d6daa7fa54fe792680fda4: Status 404 returned error can't find the container with id 1ff6ae06d4caebbdfcb99e13793c5660c7896a91d9d6daa7fa54fe792680fda4 Nov 26 05:43:41 crc kubenswrapper[4871]: I1126 05:43:41.599864 4871 generic.go:334] "Generic (PLEG): container finished" podID="1e893829-69cb-4a4f-9b97-5b96332e5724" containerID="4cad5d60e79ce469f85069056ad86015e022a74a277ba0f87ec936c22221c73a" exitCode=0 Nov 26 05:43:41 crc kubenswrapper[4871]: I1126 05:43:41.599948 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-gggp7" event={"ID":"1e893829-69cb-4a4f-9b97-5b96332e5724","Type":"ContainerDied","Data":"4cad5d60e79ce469f85069056ad86015e022a74a277ba0f87ec936c22221c73a"} Nov 26 05:43:41 crc kubenswrapper[4871]: I1126 05:43:41.602366 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-65fb6dbd77-xt6d5" event={"ID":"dbb025f2-31cc-41c2-886d-4cd68ed17d44","Type":"ContainerStarted","Data":"1ff6ae06d4caebbdfcb99e13793c5660c7896a91d9d6daa7fa54fe792680fda4"} Nov 26 05:43:41 crc kubenswrapper[4871]: I1126 05:43:41.604862 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578598f949-zhj9v" event={"ID":"2788cfc5-005e-4f99-83ac-9011cbe838cf","Type":"ContainerStarted","Data":"fd3dd424b7f77f26292a1ad9b0ed4f6c67795e95421c9982a1f419c27ab2abec"} Nov 26 05:43:41 crc kubenswrapper[4871]: I1126 05:43:41.605023 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:41 crc kubenswrapper[4871]: I1126 05:43:41.639641 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-578598f949-zhj9v" podStartSLOduration=3.63961916 podStartE2EDuration="3.63961916s" podCreationTimestamp="2025-11-26 05:43:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:43:41.628212477 +0000 UTC m=+1079.811264063" watchObservedRunningTime="2025-11-26 05:43:41.63961916 +0000 UTC m=+1079.822670746" Nov 26 05:43:42 crc kubenswrapper[4871]: I1126 05:43:42.520929 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="048c6d5e-d95d-406b-a2ee-3d0b0de4c336" path="/var/lib/kubelet/pods/048c6d5e-d95d-406b-a2ee-3d0b0de4c336/volumes" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.029683 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-gggp7" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.166612 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e893829-69cb-4a4f-9b97-5b96332e5724-combined-ca-bundle\") pod \"1e893829-69cb-4a4f-9b97-5b96332e5724\" (UID: \"1e893829-69cb-4a4f-9b97-5b96332e5724\") " Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.166723 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e893829-69cb-4a4f-9b97-5b96332e5724-config-data\") pod \"1e893829-69cb-4a4f-9b97-5b96332e5724\" (UID: \"1e893829-69cb-4a4f-9b97-5b96332e5724\") " Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.166806 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vkhhz\" (UniqueName: \"kubernetes.io/projected/1e893829-69cb-4a4f-9b97-5b96332e5724-kube-api-access-vkhhz\") pod \"1e893829-69cb-4a4f-9b97-5b96332e5724\" (UID: \"1e893829-69cb-4a4f-9b97-5b96332e5724\") " Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.166847 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1e893829-69cb-4a4f-9b97-5b96332e5724-db-sync-config-data\") pod \"1e893829-69cb-4a4f-9b97-5b96332e5724\" (UID: \"1e893829-69cb-4a4f-9b97-5b96332e5724\") " Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.172978 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e893829-69cb-4a4f-9b97-5b96332e5724-kube-api-access-vkhhz" (OuterVolumeSpecName: "kube-api-access-vkhhz") pod "1e893829-69cb-4a4f-9b97-5b96332e5724" (UID: "1e893829-69cb-4a4f-9b97-5b96332e5724"). InnerVolumeSpecName "kube-api-access-vkhhz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.174703 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e893829-69cb-4a4f-9b97-5b96332e5724-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "1e893829-69cb-4a4f-9b97-5b96332e5724" (UID: "1e893829-69cb-4a4f-9b97-5b96332e5724"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.214276 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e893829-69cb-4a4f-9b97-5b96332e5724-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1e893829-69cb-4a4f-9b97-5b96332e5724" (UID: "1e893829-69cb-4a4f-9b97-5b96332e5724"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.238367 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e893829-69cb-4a4f-9b97-5b96332e5724-config-data" (OuterVolumeSpecName: "config-data") pod "1e893829-69cb-4a4f-9b97-5b96332e5724" (UID: "1e893829-69cb-4a4f-9b97-5b96332e5724"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.269170 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e893829-69cb-4a4f-9b97-5b96332e5724-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.269441 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vkhhz\" (UniqueName: \"kubernetes.io/projected/1e893829-69cb-4a4f-9b97-5b96332e5724-kube-api-access-vkhhz\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.269453 4871 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1e893829-69cb-4a4f-9b97-5b96332e5724-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.269462 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e893829-69cb-4a4f-9b97-5b96332e5724-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.659942 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-gggp7" event={"ID":"1e893829-69cb-4a4f-9b97-5b96332e5724","Type":"ContainerDied","Data":"f9498ec4a15a27cec60ac4b2814eb1d57fccde1e3aad2ea1eb68555982f813de"} Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.659983 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f9498ec4a15a27cec60ac4b2814eb1d57fccde1e3aad2ea1eb68555982f813de" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.660043 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-gggp7" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.834568 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 26 05:43:43 crc kubenswrapper[4871]: E1126 05:43:43.834962 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e893829-69cb-4a4f-9b97-5b96332e5724" containerName="watcher-db-sync" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.834979 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e893829-69cb-4a4f-9b97-5b96332e5724" containerName="watcher-db-sync" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.835159 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e893829-69cb-4a4f-9b97-5b96332e5724" containerName="watcher-db-sync" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.835791 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.837631 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-mph7b" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.840562 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.848487 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.946543 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-applier-0"] Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.947738 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.949623 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-applier-config-data" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.980781 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/906807e1-f724-4ab4-9ccc-95656188890e-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.980828 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/906807e1-f724-4ab4-9ccc-95656188890e-logs\") pod \"watcher-decision-engine-0\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.980848 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/906807e1-f724-4ab4-9ccc-95656188890e-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.981032 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gz64\" (UniqueName: \"kubernetes.io/projected/906807e1-f724-4ab4-9ccc-95656188890e-kube-api-access-4gz64\") pod \"watcher-decision-engine-0\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.981681 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/906807e1-f724-4ab4-9ccc-95656188890e-config-data\") pod \"watcher-decision-engine-0\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.984110 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.985683 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.989281 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Nov 26 05:43:43 crc kubenswrapper[4871]: I1126 05:43:43.996406 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.009749 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.084283 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4gz64\" (UniqueName: \"kubernetes.io/projected/906807e1-f724-4ab4-9ccc-95656188890e-kube-api-access-4gz64\") pod \"watcher-decision-engine-0\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.084326 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmqp5\" (UniqueName: \"kubernetes.io/projected/e939bb2f-dadb-4353-8845-f31c42b87a75-kube-api-access-vmqp5\") pod \"watcher-applier-0\" (UID: \"e939bb2f-dadb-4353-8845-f31c42b87a75\") " pod="openstack/watcher-applier-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.084366 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/711348b2-05b6-4d20-8eea-c2e19c4dc949-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " pod="openstack/watcher-api-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.084395 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e939bb2f-dadb-4353-8845-f31c42b87a75-logs\") pod \"watcher-applier-0\" (UID: \"e939bb2f-dadb-4353-8845-f31c42b87a75\") " pod="openstack/watcher-applier-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.084610 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e939bb2f-dadb-4353-8845-f31c42b87a75-config-data\") pod \"watcher-applier-0\" (UID: \"e939bb2f-dadb-4353-8845-f31c42b87a75\") " pod="openstack/watcher-applier-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.084697 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4g765\" (UniqueName: \"kubernetes.io/projected/711348b2-05b6-4d20-8eea-c2e19c4dc949-kube-api-access-4g765\") pod \"watcher-api-0\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " pod="openstack/watcher-api-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.084797 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/906807e1-f724-4ab4-9ccc-95656188890e-config-data\") pod \"watcher-decision-engine-0\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.084855 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/711348b2-05b6-4d20-8eea-c2e19c4dc949-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " pod="openstack/watcher-api-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.084878 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/711348b2-05b6-4d20-8eea-c2e19c4dc949-logs\") pod \"watcher-api-0\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " pod="openstack/watcher-api-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.084952 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e939bb2f-dadb-4353-8845-f31c42b87a75-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"e939bb2f-dadb-4353-8845-f31c42b87a75\") " pod="openstack/watcher-applier-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.085038 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/906807e1-f724-4ab4-9ccc-95656188890e-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.085064 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/906807e1-f724-4ab4-9ccc-95656188890e-logs\") pod \"watcher-decision-engine-0\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.085094 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/906807e1-f724-4ab4-9ccc-95656188890e-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.085125 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/711348b2-05b6-4d20-8eea-c2e19c4dc949-config-data\") pod \"watcher-api-0\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " pod="openstack/watcher-api-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.085704 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/906807e1-f724-4ab4-9ccc-95656188890e-logs\") pod \"watcher-decision-engine-0\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.090123 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/906807e1-f724-4ab4-9ccc-95656188890e-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.091108 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/906807e1-f724-4ab4-9ccc-95656188890e-config-data\") pod \"watcher-decision-engine-0\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.097296 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/906807e1-f724-4ab4-9ccc-95656188890e-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.100196 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gz64\" (UniqueName: \"kubernetes.io/projected/906807e1-f724-4ab4-9ccc-95656188890e-kube-api-access-4gz64\") pod \"watcher-decision-engine-0\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.151990 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.187059 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/711348b2-05b6-4d20-8eea-c2e19c4dc949-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " pod="openstack/watcher-api-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.187353 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e939bb2f-dadb-4353-8845-f31c42b87a75-logs\") pod \"watcher-applier-0\" (UID: \"e939bb2f-dadb-4353-8845-f31c42b87a75\") " pod="openstack/watcher-applier-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.187375 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e939bb2f-dadb-4353-8845-f31c42b87a75-config-data\") pod \"watcher-applier-0\" (UID: \"e939bb2f-dadb-4353-8845-f31c42b87a75\") " pod="openstack/watcher-applier-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.187402 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4g765\" (UniqueName: \"kubernetes.io/projected/711348b2-05b6-4d20-8eea-c2e19c4dc949-kube-api-access-4g765\") pod \"watcher-api-0\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " pod="openstack/watcher-api-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.187443 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/711348b2-05b6-4d20-8eea-c2e19c4dc949-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " pod="openstack/watcher-api-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.187459 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/711348b2-05b6-4d20-8eea-c2e19c4dc949-logs\") pod \"watcher-api-0\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " pod="openstack/watcher-api-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.187489 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e939bb2f-dadb-4353-8845-f31c42b87a75-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"e939bb2f-dadb-4353-8845-f31c42b87a75\") " pod="openstack/watcher-applier-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.187548 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/711348b2-05b6-4d20-8eea-c2e19c4dc949-config-data\") pod \"watcher-api-0\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " pod="openstack/watcher-api-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.187600 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmqp5\" (UniqueName: \"kubernetes.io/projected/e939bb2f-dadb-4353-8845-f31c42b87a75-kube-api-access-vmqp5\") pod \"watcher-applier-0\" (UID: \"e939bb2f-dadb-4353-8845-f31c42b87a75\") " pod="openstack/watcher-applier-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.189768 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/711348b2-05b6-4d20-8eea-c2e19c4dc949-logs\") pod \"watcher-api-0\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " pod="openstack/watcher-api-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.190171 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e939bb2f-dadb-4353-8845-f31c42b87a75-logs\") pod \"watcher-applier-0\" (UID: \"e939bb2f-dadb-4353-8845-f31c42b87a75\") " pod="openstack/watcher-applier-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.192763 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/711348b2-05b6-4d20-8eea-c2e19c4dc949-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " pod="openstack/watcher-api-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.193374 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/711348b2-05b6-4d20-8eea-c2e19c4dc949-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " pod="openstack/watcher-api-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.195498 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e939bb2f-dadb-4353-8845-f31c42b87a75-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"e939bb2f-dadb-4353-8845-f31c42b87a75\") " pod="openstack/watcher-applier-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.195942 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e939bb2f-dadb-4353-8845-f31c42b87a75-config-data\") pod \"watcher-applier-0\" (UID: \"e939bb2f-dadb-4353-8845-f31c42b87a75\") " pod="openstack/watcher-applier-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.198246 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/711348b2-05b6-4d20-8eea-c2e19c4dc949-config-data\") pod \"watcher-api-0\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " pod="openstack/watcher-api-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.208055 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmqp5\" (UniqueName: \"kubernetes.io/projected/e939bb2f-dadb-4353-8845-f31c42b87a75-kube-api-access-vmqp5\") pod \"watcher-applier-0\" (UID: \"e939bb2f-dadb-4353-8845-f31c42b87a75\") " pod="openstack/watcher-applier-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.209064 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4g765\" (UniqueName: \"kubernetes.io/projected/711348b2-05b6-4d20-8eea-c2e19c4dc949-kube-api-access-4g765\") pod \"watcher-api-0\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " pod="openstack/watcher-api-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.283793 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.310655 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.688609 4871 generic.go:334] "Generic (PLEG): container finished" podID="d4fc7db5-083a-4a96-ac48-081df4923f57" containerID="80f653f1f49ea6a8bc0a49af029f7235a6e20f2c0333fc2f21be1d9d654ec329" exitCode=0 Nov 26 05:43:44 crc kubenswrapper[4871]: I1126 05:43:44.688646 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gzt89" event={"ID":"d4fc7db5-083a-4a96-ac48-081df4923f57","Type":"ContainerDied","Data":"80f653f1f49ea6a8bc0a49af029f7235a6e20f2c0333fc2f21be1d9d654ec329"} Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.625060 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7f4dbc4c7c-tkmkl"] Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.645560 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-8665945b44-wbcwv"] Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.646967 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.649739 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.657590 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-8665945b44-wbcwv"] Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.713419 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-65fb6dbd77-xt6d5"] Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.740183 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-horizon-secret-key\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.740248 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-combined-ca-bundle\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.740274 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-config-data\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.740352 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdjvn\" (UniqueName: \"kubernetes.io/projected/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-kube-api-access-fdjvn\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.740419 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-scripts\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.740438 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-logs\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.740484 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-horizon-tls-certs\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.741289 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7cbf6bc784-rm6hn"] Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.743300 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.764918 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7cbf6bc784-rm6hn"] Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.841938 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rpklh\" (UniqueName: \"kubernetes.io/projected/4a2ec979-4e84-42ce-9299-8b9f5d88f001-kube-api-access-rpklh\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.841999 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4a2ec979-4e84-42ce-9299-8b9f5d88f001-horizon-secret-key\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.842025 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdjvn\" (UniqueName: \"kubernetes.io/projected/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-kube-api-access-fdjvn\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.842254 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a2ec979-4e84-42ce-9299-8b9f5d88f001-combined-ca-bundle\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.842389 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-scripts\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.842438 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-logs\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.842917 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-logs\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.842980 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a2ec979-4e84-42ce-9299-8b9f5d88f001-logs\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.843087 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-horizon-tls-certs\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.843130 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4a2ec979-4e84-42ce-9299-8b9f5d88f001-scripts\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.843181 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4a2ec979-4e84-42ce-9299-8b9f5d88f001-config-data\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.843398 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-scripts\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.843506 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-horizon-secret-key\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.843650 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-combined-ca-bundle\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.843673 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-config-data\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.843716 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a2ec979-4e84-42ce-9299-8b9f5d88f001-horizon-tls-certs\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.845557 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-config-data\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.848583 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-horizon-tls-certs\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.848855 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-horizon-secret-key\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.849351 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-combined-ca-bundle\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.859787 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdjvn\" (UniqueName: \"kubernetes.io/projected/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-kube-api-access-fdjvn\") pod \"horizon-8665945b44-wbcwv\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.945981 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a2ec979-4e84-42ce-9299-8b9f5d88f001-horizon-tls-certs\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.946061 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rpklh\" (UniqueName: \"kubernetes.io/projected/4a2ec979-4e84-42ce-9299-8b9f5d88f001-kube-api-access-rpklh\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.946087 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4a2ec979-4e84-42ce-9299-8b9f5d88f001-horizon-secret-key\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.946130 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a2ec979-4e84-42ce-9299-8b9f5d88f001-combined-ca-bundle\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.946164 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a2ec979-4e84-42ce-9299-8b9f5d88f001-logs\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.946201 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4a2ec979-4e84-42ce-9299-8b9f5d88f001-scripts\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.946222 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4a2ec979-4e84-42ce-9299-8b9f5d88f001-config-data\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.947287 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4a2ec979-4e84-42ce-9299-8b9f5d88f001-scripts\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.947623 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a2ec979-4e84-42ce-9299-8b9f5d88f001-logs\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.947962 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4a2ec979-4e84-42ce-9299-8b9f5d88f001-config-data\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.949662 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a2ec979-4e84-42ce-9299-8b9f5d88f001-horizon-tls-certs\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.950320 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a2ec979-4e84-42ce-9299-8b9f5d88f001-combined-ca-bundle\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.950676 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4a2ec979-4e84-42ce-9299-8b9f5d88f001-horizon-secret-key\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.969245 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rpklh\" (UniqueName: \"kubernetes.io/projected/4a2ec979-4e84-42ce-9299-8b9f5d88f001-kube-api-access-rpklh\") pod \"horizon-7cbf6bc784-rm6hn\" (UID: \"4a2ec979-4e84-42ce-9299-8b9f5d88f001\") " pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:46 crc kubenswrapper[4871]: I1126 05:43:46.970117 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:43:47 crc kubenswrapper[4871]: I1126 05:43:47.070950 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:43:48 crc kubenswrapper[4871]: I1126 05:43:48.603740 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:43:48 crc kubenswrapper[4871]: I1126 05:43:48.664008 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55b99bf79c-8q6qq"] Nov 26 05:43:48 crc kubenswrapper[4871]: I1126 05:43:48.664214 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" podUID="bc4bffe3-86fd-478d-95cf-edf716bbf3f2" containerName="dnsmasq-dns" containerID="cri-o://4b3f7ef4dd996794249355cfc7b0eae92d7ebc4d4d9080ced156cb574b241e25" gracePeriod=10 Nov 26 05:43:49 crc kubenswrapper[4871]: I1126 05:43:49.749709 4871 generic.go:334] "Generic (PLEG): container finished" podID="bc4bffe3-86fd-478d-95cf-edf716bbf3f2" containerID="4b3f7ef4dd996794249355cfc7b0eae92d7ebc4d4d9080ced156cb574b241e25" exitCode=0 Nov 26 05:43:49 crc kubenswrapper[4871]: I1126 05:43:49.749781 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" event={"ID":"bc4bffe3-86fd-478d-95cf-edf716bbf3f2","Type":"ContainerDied","Data":"4b3f7ef4dd996794249355cfc7b0eae92d7ebc4d4d9080ced156cb574b241e25"} Nov 26 05:43:53 crc kubenswrapper[4871]: I1126 05:43:53.614793 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:43:53 crc kubenswrapper[4871]: I1126 05:43:53.615314 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:43:53 crc kubenswrapper[4871]: I1126 05:43:53.633507 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" podUID="bc4bffe3-86fd-478d-95cf-edf716bbf3f2" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.137:5353: connect: connection refused" Nov 26 05:43:55 crc kubenswrapper[4871]: E1126 05:43:55.825971 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-horizon:current" Nov 26 05:43:55 crc kubenswrapper[4871]: E1126 05:43:55.826606 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-horizon:current" Nov 26 05:43:55 crc kubenswrapper[4871]: E1126 05:43:55.826793 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.rdoproject.org/podified-master-centos10/openstack-horizon:current,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd4hd7h65fh9chb6h599h58fhffh57fh96h86hfch675h558h55hbfh5dh78hd9h585h689h68dh88h64bh675h555h694h5ffh66h6bh547h5b6q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:yes,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-z6l9g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-65fb6dbd77-xt6d5_openstack(dbb025f2-31cc-41c2-886d-4cd68ed17d44): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 05:43:55 crc kubenswrapper[4871]: E1126 05:43:55.829845 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-horizon:current\\\"\"]" pod="openstack/horizon-65fb6dbd77-xt6d5" podUID="dbb025f2-31cc-41c2-886d-4cd68ed17d44" Nov 26 05:43:57 crc kubenswrapper[4871]: E1126 05:43:57.189415 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-placement-api:current" Nov 26 05:43:57 crc kubenswrapper[4871]: E1126 05:43:57.189502 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-placement-api:current" Nov 26 05:43:57 crc kubenswrapper[4871]: E1126 05:43:57.189682 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-placement-api:current,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hm6dx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-6z5bf_openstack(450179f7-baf0-481d-ad0e-4d3534ee28f4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 05:43:57 crc kubenswrapper[4871]: E1126 05:43:57.192760 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-6z5bf" podUID="450179f7-baf0-481d-ad0e-4d3534ee28f4" Nov 26 05:43:57 crc kubenswrapper[4871]: E1126 05:43:57.206321 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-horizon:current" Nov 26 05:43:57 crc kubenswrapper[4871]: E1126 05:43:57.206401 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-horizon:current" Nov 26 05:43:57 crc kubenswrapper[4871]: E1126 05:43:57.206589 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.rdoproject.org/podified-master-centos10/openstack-horizon:current,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd5h6bhfh65bhf5h5bfhb6h57bh59h5bchcbh5ffh649h694h5cfh5bdh5d4h684h557hd9h577h574h88h5dch688hd9h64ch57h669h6h59ch658q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:yes,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cn8cb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-7f4dbc4c7c-tkmkl_openstack(3f3f046f-4c4d-4c85-9d61-043c3006ea05): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 05:43:57 crc kubenswrapper[4871]: E1126 05:43:57.219837 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-horizon:current\\\"\"]" pod="openstack/horizon-7f4dbc4c7c-tkmkl" podUID="3f3f046f-4c4d-4c85-9d61-043c3006ea05" Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.297838 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.459043 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-config-data\") pod \"d4fc7db5-083a-4a96-ac48-081df4923f57\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.459115 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-fernet-keys\") pod \"d4fc7db5-083a-4a96-ac48-081df4923f57\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.459170 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-credential-keys\") pod \"d4fc7db5-083a-4a96-ac48-081df4923f57\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.459238 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wk449\" (UniqueName: \"kubernetes.io/projected/d4fc7db5-083a-4a96-ac48-081df4923f57-kube-api-access-wk449\") pod \"d4fc7db5-083a-4a96-ac48-081df4923f57\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.459284 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-scripts\") pod \"d4fc7db5-083a-4a96-ac48-081df4923f57\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.459346 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-combined-ca-bundle\") pod \"d4fc7db5-083a-4a96-ac48-081df4923f57\" (UID: \"d4fc7db5-083a-4a96-ac48-081df4923f57\") " Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.466885 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "d4fc7db5-083a-4a96-ac48-081df4923f57" (UID: "d4fc7db5-083a-4a96-ac48-081df4923f57"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.467144 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-scripts" (OuterVolumeSpecName: "scripts") pod "d4fc7db5-083a-4a96-ac48-081df4923f57" (UID: "d4fc7db5-083a-4a96-ac48-081df4923f57"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.467329 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "d4fc7db5-083a-4a96-ac48-081df4923f57" (UID: "d4fc7db5-083a-4a96-ac48-081df4923f57"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.467924 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4fc7db5-083a-4a96-ac48-081df4923f57-kube-api-access-wk449" (OuterVolumeSpecName: "kube-api-access-wk449") pod "d4fc7db5-083a-4a96-ac48-081df4923f57" (UID: "d4fc7db5-083a-4a96-ac48-081df4923f57"). InnerVolumeSpecName "kube-api-access-wk449". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.495361 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d4fc7db5-083a-4a96-ac48-081df4923f57" (UID: "d4fc7db5-083a-4a96-ac48-081df4923f57"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.504742 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-config-data" (OuterVolumeSpecName: "config-data") pod "d4fc7db5-083a-4a96-ac48-081df4923f57" (UID: "d4fc7db5-083a-4a96-ac48-081df4923f57"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.562841 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.562878 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.562888 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.562897 4871 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.562906 4871 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d4fc7db5-083a-4a96-ac48-081df4923f57-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.562915 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wk449\" (UniqueName: \"kubernetes.io/projected/d4fc7db5-083a-4a96-ac48-081df4923f57-kube-api-access-wk449\") on node \"crc\" DevicePath \"\"" Nov 26 05:43:57 crc kubenswrapper[4871]: E1126 05:43:57.780160 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-barbican-api:current" Nov 26 05:43:57 crc kubenswrapper[4871]: E1126 05:43:57.780214 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-barbican-api:current" Nov 26 05:43:57 crc kubenswrapper[4871]: E1126 05:43:57.780328 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-barbican-api:current,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ztplr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-lwfp5_openstack(e72bbf93-367f-4207-b846-b9cf819b9b4c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 05:43:57 crc kubenswrapper[4871]: E1126 05:43:57.781481 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-lwfp5" podUID="e72bbf93-367f-4207-b846-b9cf819b9b4c" Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.821619 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gzt89" event={"ID":"d4fc7db5-083a-4a96-ac48-081df4923f57","Type":"ContainerDied","Data":"0155f0a0c67e8299ae5b41f611dd0f641db02eb05d0ff103f720572a353a2cf2"} Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.821674 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gzt89" Nov 26 05:43:57 crc kubenswrapper[4871]: I1126 05:43:57.821727 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0155f0a0c67e8299ae5b41f611dd0f641db02eb05d0ff103f720572a353a2cf2" Nov 26 05:43:57 crc kubenswrapper[4871]: E1126 05:43:57.832120 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-barbican-api:current\\\"\"" pod="openstack/barbican-db-sync-lwfp5" podUID="e72bbf93-367f-4207-b846-b9cf819b9b4c" Nov 26 05:43:57 crc kubenswrapper[4871]: E1126 05:43:57.832361 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-placement-api:current\\\"\"" pod="openstack/placement-db-sync-6z5bf" podUID="450179f7-baf0-481d-ad0e-4d3534ee28f4" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.483241 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-gzt89"] Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.492262 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-gzt89"] Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.518943 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4fc7db5-083a-4a96-ac48-081df4923f57" path="/var/lib/kubelet/pods/d4fc7db5-083a-4a96-ac48-081df4923f57/volumes" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.583215 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-kkjjm"] Nov 26 05:43:58 crc kubenswrapper[4871]: E1126 05:43:58.583625 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4fc7db5-083a-4a96-ac48-081df4923f57" containerName="keystone-bootstrap" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.583644 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4fc7db5-083a-4a96-ac48-081df4923f57" containerName="keystone-bootstrap" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.583833 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4fc7db5-083a-4a96-ac48-081df4923f57" containerName="keystone-bootstrap" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.584403 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.589409 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.589757 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.590330 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-w7bjw" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.590492 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.590657 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.594508 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-kkjjm"] Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.687518 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-combined-ca-bundle\") pod \"keystone-bootstrap-kkjjm\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.688074 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-fernet-keys\") pod \"keystone-bootstrap-kkjjm\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.688114 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-credential-keys\") pod \"keystone-bootstrap-kkjjm\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.688149 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-config-data\") pod \"keystone-bootstrap-kkjjm\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.688173 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgbq2\" (UniqueName: \"kubernetes.io/projected/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-kube-api-access-bgbq2\") pod \"keystone-bootstrap-kkjjm\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.688195 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-scripts\") pod \"keystone-bootstrap-kkjjm\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.790330 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-combined-ca-bundle\") pod \"keystone-bootstrap-kkjjm\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.791377 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-fernet-keys\") pod \"keystone-bootstrap-kkjjm\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.791784 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-credential-keys\") pod \"keystone-bootstrap-kkjjm\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.791940 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-config-data\") pod \"keystone-bootstrap-kkjjm\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.792272 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgbq2\" (UniqueName: \"kubernetes.io/projected/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-kube-api-access-bgbq2\") pod \"keystone-bootstrap-kkjjm\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.792317 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-scripts\") pod \"keystone-bootstrap-kkjjm\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.795868 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-combined-ca-bundle\") pod \"keystone-bootstrap-kkjjm\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.796951 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-scripts\") pod \"keystone-bootstrap-kkjjm\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.797005 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-credential-keys\") pod \"keystone-bootstrap-kkjjm\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.799935 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-fernet-keys\") pod \"keystone-bootstrap-kkjjm\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.805037 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-config-data\") pod \"keystone-bootstrap-kkjjm\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.807855 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgbq2\" (UniqueName: \"kubernetes.io/projected/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-kube-api-access-bgbq2\") pod \"keystone-bootstrap-kkjjm\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:43:58 crc kubenswrapper[4871]: I1126 05:43:58.910484 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:44:00 crc kubenswrapper[4871]: I1126 05:44:00.861751 4871 generic.go:334] "Generic (PLEG): container finished" podID="977ad0ca-daf1-4b9d-b75a-c697ff3239c2" containerID="0d7d4bcebf3e501276514ff5ba7838d4951fc3f90c68c7d6b24ffd745bdf19a6" exitCode=0 Nov 26 05:44:00 crc kubenswrapper[4871]: I1126 05:44:00.862111 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-kvkr5" event={"ID":"977ad0ca-daf1-4b9d-b75a-c697ff3239c2","Type":"ContainerDied","Data":"0d7d4bcebf3e501276514ff5ba7838d4951fc3f90c68c7d6b24ffd745bdf19a6"} Nov 26 05:44:03 crc kubenswrapper[4871]: I1126 05:44:03.634251 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" podUID="bc4bffe3-86fd-478d-95cf-edf716bbf3f2" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.137:5353: i/o timeout" Nov 26 05:44:04 crc kubenswrapper[4871]: E1126 05:44:04.662418 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-glance-api:current" Nov 26 05:44:04 crc kubenswrapper[4871]: E1126 05:44:04.662470 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-glance-api:current" Nov 26 05:44:04 crc kubenswrapper[4871]: E1126 05:44:04.662648 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-glance-api:current,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t92vp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-9kf7b_openstack(b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 05:44:04 crc kubenswrapper[4871]: E1126 05:44:04.663892 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-9kf7b" podUID="b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6" Nov 26 05:44:04 crc kubenswrapper[4871]: E1126 05:44:04.904881 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-glance-api:current\\\"\"" pod="openstack/glance-db-sync-9kf7b" podUID="b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6" Nov 26 05:44:08 crc kubenswrapper[4871]: I1126 05:44:08.635876 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" podUID="bc4bffe3-86fd-478d-95cf-edf716bbf3f2" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.137:5353: i/o timeout" Nov 26 05:44:08 crc kubenswrapper[4871]: I1126 05:44:08.636831 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:44:12 crc kubenswrapper[4871]: I1126 05:44:12.979729 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" event={"ID":"bc4bffe3-86fd-478d-95cf-edf716bbf3f2","Type":"ContainerDied","Data":"8df039fbccdcda8291f39b42da1aa2ac67afbe2d4dd352237156f98ce5923d74"} Nov 26 05:44:12 crc kubenswrapper[4871]: I1126 05:44:12.980190 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8df039fbccdcda8291f39b42da1aa2ac67afbe2d4dd352237156f98ce5923d74" Nov 26 05:44:12 crc kubenswrapper[4871]: I1126 05:44:12.981424 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f4dbc4c7c-tkmkl" event={"ID":"3f3f046f-4c4d-4c85-9d61-043c3006ea05","Type":"ContainerDied","Data":"94511934e403a878b30b92d89cd60c557bb560f9ecb88f1050f70408638746b0"} Nov 26 05:44:12 crc kubenswrapper[4871]: I1126 05:44:12.981449 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="94511934e403a878b30b92d89cd60c557bb560f9ecb88f1050f70408638746b0" Nov 26 05:44:12 crc kubenswrapper[4871]: I1126 05:44:12.982606 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-65fb6dbd77-xt6d5" event={"ID":"dbb025f2-31cc-41c2-886d-4cd68ed17d44","Type":"ContainerDied","Data":"1ff6ae06d4caebbdfcb99e13793c5660c7896a91d9d6daa7fa54fe792680fda4"} Nov 26 05:44:12 crc kubenswrapper[4871]: I1126 05:44:12.982626 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ff6ae06d4caebbdfcb99e13793c5660c7896a91d9d6daa7fa54fe792680fda4" Nov 26 05:44:12 crc kubenswrapper[4871]: I1126 05:44:12.985394 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-kvkr5" event={"ID":"977ad0ca-daf1-4b9d-b75a-c697ff3239c2","Type":"ContainerDied","Data":"105fb12375adcce0f56a236d7c131a522c5f953649273a5892af901e95a09e2d"} Nov 26 05:44:12 crc kubenswrapper[4871]: I1126 05:44:12.985444 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="105fb12375adcce0f56a236d7c131a522c5f953649273a5892af901e95a09e2d" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.004952 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-65fb6dbd77-xt6d5" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.012621 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.025308 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f4dbc4c7c-tkmkl" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.031740 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-kvkr5" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.171228 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f3f046f-4c4d-4c85-9d61-043c3006ea05-logs\") pod \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.171266 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3f3f046f-4c4d-4c85-9d61-043c3006ea05-horizon-secret-key\") pod \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.171285 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dbb025f2-31cc-41c2-886d-4cd68ed17d44-horizon-secret-key\") pod \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.171306 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/977ad0ca-daf1-4b9d-b75a-c697ff3239c2-config\") pod \"977ad0ca-daf1-4b9d-b75a-c697ff3239c2\" (UID: \"977ad0ca-daf1-4b9d-b75a-c697ff3239c2\") " Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.171328 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vwf6\" (UniqueName: \"kubernetes.io/projected/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-kube-api-access-4vwf6\") pod \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.171358 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-config\") pod \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.171499 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dbb025f2-31cc-41c2-886d-4cd68ed17d44-scripts\") pod \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.171568 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6l9g\" (UniqueName: \"kubernetes.io/projected/dbb025f2-31cc-41c2-886d-4cd68ed17d44-kube-api-access-z6l9g\") pod \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.171619 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/977ad0ca-daf1-4b9d-b75a-c697ff3239c2-combined-ca-bundle\") pod \"977ad0ca-daf1-4b9d-b75a-c697ff3239c2\" (UID: \"977ad0ca-daf1-4b9d-b75a-c697ff3239c2\") " Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.171622 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f3f046f-4c4d-4c85-9d61-043c3006ea05-logs" (OuterVolumeSpecName: "logs") pod "3f3f046f-4c4d-4c85-9d61-043c3006ea05" (UID: "3f3f046f-4c4d-4c85-9d61-043c3006ea05"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.171693 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-ovsdbserver-nb\") pod \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.171721 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-dns-swift-storage-0\") pod \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.171774 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3f3f046f-4c4d-4c85-9d61-043c3006ea05-config-data\") pod \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.171794 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dbb025f2-31cc-41c2-886d-4cd68ed17d44-config-data\") pod \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.171814 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4flf\" (UniqueName: \"kubernetes.io/projected/977ad0ca-daf1-4b9d-b75a-c697ff3239c2-kube-api-access-f4flf\") pod \"977ad0ca-daf1-4b9d-b75a-c697ff3239c2\" (UID: \"977ad0ca-daf1-4b9d-b75a-c697ff3239c2\") " Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.171855 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-ovsdbserver-sb\") pod \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.171922 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-dns-svc\") pod \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\" (UID: \"bc4bffe3-86fd-478d-95cf-edf716bbf3f2\") " Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.171947 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cn8cb\" (UniqueName: \"kubernetes.io/projected/3f3f046f-4c4d-4c85-9d61-043c3006ea05-kube-api-access-cn8cb\") pod \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.171969 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f3f046f-4c4d-4c85-9d61-043c3006ea05-scripts\") pod \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\" (UID: \"3f3f046f-4c4d-4c85-9d61-043c3006ea05\") " Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.172012 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dbb025f2-31cc-41c2-886d-4cd68ed17d44-logs\") pod \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\" (UID: \"dbb025f2-31cc-41c2-886d-4cd68ed17d44\") " Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.172155 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbb025f2-31cc-41c2-886d-4cd68ed17d44-scripts" (OuterVolumeSpecName: "scripts") pod "dbb025f2-31cc-41c2-886d-4cd68ed17d44" (UID: "dbb025f2-31cc-41c2-886d-4cd68ed17d44"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.172822 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f3f046f-4c4d-4c85-9d61-043c3006ea05-config-data" (OuterVolumeSpecName: "config-data") pod "3f3f046f-4c4d-4c85-9d61-043c3006ea05" (UID: "3f3f046f-4c4d-4c85-9d61-043c3006ea05"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.172982 4871 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f3f046f-4c4d-4c85-9d61-043c3006ea05-logs\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.173027 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dbb025f2-31cc-41c2-886d-4cd68ed17d44-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.173042 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3f3f046f-4c4d-4c85-9d61-043c3006ea05-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.175408 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbb025f2-31cc-41c2-886d-4cd68ed17d44-logs" (OuterVolumeSpecName: "logs") pod "dbb025f2-31cc-41c2-886d-4cd68ed17d44" (UID: "dbb025f2-31cc-41c2-886d-4cd68ed17d44"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.175454 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f3f046f-4c4d-4c85-9d61-043c3006ea05-scripts" (OuterVolumeSpecName: "scripts") pod "3f3f046f-4c4d-4c85-9d61-043c3006ea05" (UID: "3f3f046f-4c4d-4c85-9d61-043c3006ea05"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.175638 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbb025f2-31cc-41c2-886d-4cd68ed17d44-config-data" (OuterVolumeSpecName: "config-data") pod "dbb025f2-31cc-41c2-886d-4cd68ed17d44" (UID: "dbb025f2-31cc-41c2-886d-4cd68ed17d44"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.179300 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f3f046f-4c4d-4c85-9d61-043c3006ea05-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "3f3f046f-4c4d-4c85-9d61-043c3006ea05" (UID: "3f3f046f-4c4d-4c85-9d61-043c3006ea05"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.191099 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbb025f2-31cc-41c2-886d-4cd68ed17d44-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "dbb025f2-31cc-41c2-886d-4cd68ed17d44" (UID: "dbb025f2-31cc-41c2-886d-4cd68ed17d44"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.191131 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-kube-api-access-4vwf6" (OuterVolumeSpecName: "kube-api-access-4vwf6") pod "bc4bffe3-86fd-478d-95cf-edf716bbf3f2" (UID: "bc4bffe3-86fd-478d-95cf-edf716bbf3f2"). InnerVolumeSpecName "kube-api-access-4vwf6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.191176 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f3f046f-4c4d-4c85-9d61-043c3006ea05-kube-api-access-cn8cb" (OuterVolumeSpecName: "kube-api-access-cn8cb") pod "3f3f046f-4c4d-4c85-9d61-043c3006ea05" (UID: "3f3f046f-4c4d-4c85-9d61-043c3006ea05"). InnerVolumeSpecName "kube-api-access-cn8cb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.191196 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/977ad0ca-daf1-4b9d-b75a-c697ff3239c2-kube-api-access-f4flf" (OuterVolumeSpecName: "kube-api-access-f4flf") pod "977ad0ca-daf1-4b9d-b75a-c697ff3239c2" (UID: "977ad0ca-daf1-4b9d-b75a-c697ff3239c2"). InnerVolumeSpecName "kube-api-access-f4flf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.191211 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbb025f2-31cc-41c2-886d-4cd68ed17d44-kube-api-access-z6l9g" (OuterVolumeSpecName: "kube-api-access-z6l9g") pod "dbb025f2-31cc-41c2-886d-4cd68ed17d44" (UID: "dbb025f2-31cc-41c2-886d-4cd68ed17d44"). InnerVolumeSpecName "kube-api-access-z6l9g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.203609 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/977ad0ca-daf1-4b9d-b75a-c697ff3239c2-config" (OuterVolumeSpecName: "config") pod "977ad0ca-daf1-4b9d-b75a-c697ff3239c2" (UID: "977ad0ca-daf1-4b9d-b75a-c697ff3239c2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.207135 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/977ad0ca-daf1-4b9d-b75a-c697ff3239c2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "977ad0ca-daf1-4b9d-b75a-c697ff3239c2" (UID: "977ad0ca-daf1-4b9d-b75a-c697ff3239c2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.221189 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bc4bffe3-86fd-478d-95cf-edf716bbf3f2" (UID: "bc4bffe3-86fd-478d-95cf-edf716bbf3f2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.223348 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bc4bffe3-86fd-478d-95cf-edf716bbf3f2" (UID: "bc4bffe3-86fd-478d-95cf-edf716bbf3f2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.224836 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-config" (OuterVolumeSpecName: "config") pod "bc4bffe3-86fd-478d-95cf-edf716bbf3f2" (UID: "bc4bffe3-86fd-478d-95cf-edf716bbf3f2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.226735 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bc4bffe3-86fd-478d-95cf-edf716bbf3f2" (UID: "bc4bffe3-86fd-478d-95cf-edf716bbf3f2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.242857 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "bc4bffe3-86fd-478d-95cf-edf716bbf3f2" (UID: "bc4bffe3-86fd-478d-95cf-edf716bbf3f2"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.274410 4871 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dbb025f2-31cc-41c2-886d-4cd68ed17d44-logs\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.274443 4871 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3f3f046f-4c4d-4c85-9d61-043c3006ea05-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.274454 4871 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dbb025f2-31cc-41c2-886d-4cd68ed17d44-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.274463 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/977ad0ca-daf1-4b9d-b75a-c697ff3239c2-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.274472 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vwf6\" (UniqueName: \"kubernetes.io/projected/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-kube-api-access-4vwf6\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.274482 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.274490 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6l9g\" (UniqueName: \"kubernetes.io/projected/dbb025f2-31cc-41c2-886d-4cd68ed17d44-kube-api-access-z6l9g\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.274499 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/977ad0ca-daf1-4b9d-b75a-c697ff3239c2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.274508 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.274515 4871 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.274539 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dbb025f2-31cc-41c2-886d-4cd68ed17d44-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.274548 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4flf\" (UniqueName: \"kubernetes.io/projected/977ad0ca-daf1-4b9d-b75a-c697ff3239c2-kube-api-access-f4flf\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.274557 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.274567 4871 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc4bffe3-86fd-478d-95cf-edf716bbf3f2-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.274575 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cn8cb\" (UniqueName: \"kubernetes.io/projected/3f3f046f-4c4d-4c85-9d61-043c3006ea05-kube-api-access-cn8cb\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.274583 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f3f046f-4c4d-4c85-9d61-043c3006ea05-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.637134 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" podUID="bc4bffe3-86fd-478d-95cf-edf716bbf3f2" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.137:5353: i/o timeout" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.992749 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-65fb6dbd77-xt6d5" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.992773 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f4dbc4c7c-tkmkl" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.992785 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-kvkr5" Nov 26 05:44:13 crc kubenswrapper[4871]: I1126 05:44:13.992753 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55b99bf79c-8q6qq" Nov 26 05:44:14 crc kubenswrapper[4871]: E1126 05:44:14.061557 4871 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-cinder-api:current" Nov 26 05:44:14 crc kubenswrapper[4871]: E1126 05:44:14.061621 4871 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-cinder-api:current" Nov 26 05:44:14 crc kubenswrapper[4871]: E1126 05:44:14.062317 4871 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-cinder-api:current,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-w9dnr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-7pzbd_openstack(c8bdb9c7-91c3-40dc-920e-6e333b18f331): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 05:44:14 crc kubenswrapper[4871]: E1126 05:44:14.063519 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-7pzbd" podUID="c8bdb9c7-91c3-40dc-920e-6e333b18f331" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.373393 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7f4dbc4c7c-tkmkl"] Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.416427 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-7f4dbc4c7c-tkmkl"] Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.438868 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7648c6b969-hsbsf"] Nov 26 05:44:14 crc kubenswrapper[4871]: E1126 05:44:14.439201 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc4bffe3-86fd-478d-95cf-edf716bbf3f2" containerName="dnsmasq-dns" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.439215 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc4bffe3-86fd-478d-95cf-edf716bbf3f2" containerName="dnsmasq-dns" Nov 26 05:44:14 crc kubenswrapper[4871]: E1126 05:44:14.439226 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc4bffe3-86fd-478d-95cf-edf716bbf3f2" containerName="init" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.439232 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc4bffe3-86fd-478d-95cf-edf716bbf3f2" containerName="init" Nov 26 05:44:14 crc kubenswrapper[4871]: E1126 05:44:14.439267 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="977ad0ca-daf1-4b9d-b75a-c697ff3239c2" containerName="neutron-db-sync" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.439273 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="977ad0ca-daf1-4b9d-b75a-c697ff3239c2" containerName="neutron-db-sync" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.439433 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc4bffe3-86fd-478d-95cf-edf716bbf3f2" containerName="dnsmasq-dns" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.439447 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="977ad0ca-daf1-4b9d-b75a-c697ff3239c2" containerName="neutron-db-sync" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.442197 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.482576 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7648c6b969-hsbsf"] Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.491280 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-65fb6dbd77-xt6d5"] Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.525440 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f3f046f-4c4d-4c85-9d61-043c3006ea05" path="/var/lib/kubelet/pods/3f3f046f-4c4d-4c85-9d61-043c3006ea05/volumes" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.526535 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-65fb6dbd77-xt6d5"] Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.526561 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-69c5fdfb8b-95x78"] Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.529604 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55b99bf79c-8q6qq"] Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.529701 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.532748 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-xhmrf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.532918 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.533038 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.533157 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.538458 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55b99bf79c-8q6qq"] Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.550101 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-69c5fdfb8b-95x78"] Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.604274 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-dns-svc\") pod \"dnsmasq-dns-7648c6b969-hsbsf\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.605041 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cj7s8\" (UniqueName: \"kubernetes.io/projected/196ce4b4-28af-4295-b20e-4d1cfa847b27-kube-api-access-cj7s8\") pod \"dnsmasq-dns-7648c6b969-hsbsf\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.605367 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-dns-swift-storage-0\") pod \"dnsmasq-dns-7648c6b969-hsbsf\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.605723 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-ovsdbserver-sb\") pod \"dnsmasq-dns-7648c6b969-hsbsf\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.616601 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-config\") pod \"dnsmasq-dns-7648c6b969-hsbsf\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.616714 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-ovsdbserver-nb\") pod \"dnsmasq-dns-7648c6b969-hsbsf\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.709182 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-8665945b44-wbcwv"] Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.718002 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.718913 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-combined-ca-bundle\") pod \"neutron-69c5fdfb8b-95x78\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.718982 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-ovsdbserver-sb\") pod \"dnsmasq-dns-7648c6b969-hsbsf\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.719006 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-config\") pod \"dnsmasq-dns-7648c6b969-hsbsf\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.719024 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-httpd-config\") pod \"neutron-69c5fdfb8b-95x78\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.719043 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-config\") pod \"neutron-69c5fdfb8b-95x78\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.719065 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-ovsdbserver-nb\") pod \"dnsmasq-dns-7648c6b969-hsbsf\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.719116 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vx8dc\" (UniqueName: \"kubernetes.io/projected/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-kube-api-access-vx8dc\") pod \"neutron-69c5fdfb8b-95x78\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.719756 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-dns-svc\") pod \"dnsmasq-dns-7648c6b969-hsbsf\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.719795 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cj7s8\" (UniqueName: \"kubernetes.io/projected/196ce4b4-28af-4295-b20e-4d1cfa847b27-kube-api-access-cj7s8\") pod \"dnsmasq-dns-7648c6b969-hsbsf\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.719832 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-ovndb-tls-certs\") pod \"neutron-69c5fdfb8b-95x78\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.719901 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-dns-swift-storage-0\") pod \"dnsmasq-dns-7648c6b969-hsbsf\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.720635 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-ovsdbserver-nb\") pod \"dnsmasq-dns-7648c6b969-hsbsf\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.720727 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-config\") pod \"dnsmasq-dns-7648c6b969-hsbsf\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.720787 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-dns-swift-storage-0\") pod \"dnsmasq-dns-7648c6b969-hsbsf\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.721229 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-dns-svc\") pod \"dnsmasq-dns-7648c6b969-hsbsf\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.722298 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-ovsdbserver-sb\") pod \"dnsmasq-dns-7648c6b969-hsbsf\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.750412 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cj7s8\" (UniqueName: \"kubernetes.io/projected/196ce4b4-28af-4295-b20e-4d1cfa847b27-kube-api-access-cj7s8\") pod \"dnsmasq-dns-7648c6b969-hsbsf\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.789977 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.821558 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-combined-ca-bundle\") pod \"neutron-69c5fdfb8b-95x78\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.821654 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-httpd-config\") pod \"neutron-69c5fdfb8b-95x78\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.823283 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-config\") pod \"neutron-69c5fdfb8b-95x78\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.823325 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vx8dc\" (UniqueName: \"kubernetes.io/projected/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-kube-api-access-vx8dc\") pod \"neutron-69c5fdfb8b-95x78\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.823461 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-ovndb-tls-certs\") pod \"neutron-69c5fdfb8b-95x78\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.830021 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-httpd-config\") pod \"neutron-69c5fdfb8b-95x78\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.836741 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-config\") pod \"neutron-69c5fdfb8b-95x78\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.836879 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-combined-ca-bundle\") pod \"neutron-69c5fdfb8b-95x78\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.843060 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-ovndb-tls-certs\") pod \"neutron-69c5fdfb8b-95x78\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.851081 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vx8dc\" (UniqueName: \"kubernetes.io/projected/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-kube-api-access-vx8dc\") pod \"neutron-69c5fdfb8b-95x78\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.867170 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.967892 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Nov 26 05:44:14 crc kubenswrapper[4871]: I1126 05:44:14.977606 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7cbf6bc784-rm6hn"] Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.046507 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-kkjjm"] Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.121984 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8665945b44-wbcwv" event={"ID":"812fa0f1-c216-4db1-b3e6-cfa862b8cb93","Type":"ContainerStarted","Data":"2f753b791176f79d2770b9ecafcbce795dfc8ecb07673c67a3c4df5b21ef9c16"} Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.122029 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8665945b44-wbcwv" event={"ID":"812fa0f1-c216-4db1-b3e6-cfa862b8cb93","Type":"ContainerStarted","Data":"157a44f46232c0458ca98556cb193e81b3368d1f99c8c97c9fd4a0bbe2a2a9a7"} Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.132572 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.144681 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"711348b2-05b6-4d20-8eea-c2e19c4dc949","Type":"ContainerStarted","Data":"5a34ebe42c2186a08d73e4fceb07a09f26bb1c87ad34e154116e0f781969d17f"} Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.144732 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"711348b2-05b6-4d20-8eea-c2e19c4dc949","Type":"ContainerStarted","Data":"96eece22e26aec4e3813ce1d6a3b2eccd59f1f708317e747c6c63dca29c50b88"} Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.174890 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-kkjjm" event={"ID":"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e","Type":"ContainerStarted","Data":"3ea3ecce11cb20df2f874230ffe699b89a37309508cf5064bab7cc2d252daf16"} Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.178506 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7cbf6bc784-rm6hn" event={"ID":"4a2ec979-4e84-42ce-9299-8b9f5d88f001","Type":"ContainerStarted","Data":"edb7e72f6d2b3678c4cd2683f95ffb17fb80260baaa242def51c14cb179a38b6"} Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.185693 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"e939bb2f-dadb-4353-8845-f31c42b87a75","Type":"ContainerStarted","Data":"61aecd061c0825553cca8d0f07e2655399d32f9d9e43c6af30787f542aae3cde"} Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.206486 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b724414-8682-4e73-8b2d-305fce381613","Type":"ContainerStarted","Data":"b7ca991c6e161910746bb8f6dd58276dd7ecdff6e5266dd6fb56dc67732661cb"} Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.233277 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67f98f67b9-25fgx" event={"ID":"3a392cdb-377e-4047-a1f4-f190429fe076","Type":"ContainerStarted","Data":"ae8ac99537258d7df8927e67f7ad624c60d74e466377d617ef3ff08389e1e3fd"} Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.233375 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67f98f67b9-25fgx" event={"ID":"3a392cdb-377e-4047-a1f4-f190429fe076","Type":"ContainerStarted","Data":"f5481d814a3efcc61a3330ac7e4e3e5f3899e1e7c4082c57528613cb9400a3cd"} Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.233624 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-67f98f67b9-25fgx" podUID="3a392cdb-377e-4047-a1f4-f190429fe076" containerName="horizon-log" containerID="cri-o://f5481d814a3efcc61a3330ac7e4e3e5f3899e1e7c4082c57528613cb9400a3cd" gracePeriod=30 Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.234027 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-67f98f67b9-25fgx" podUID="3a392cdb-377e-4047-a1f4-f190429fe076" containerName="horizon" containerID="cri-o://ae8ac99537258d7df8927e67f7ad624c60d74e466377d617ef3ff08389e1e3fd" gracePeriod=30 Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.248364 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-6z5bf" event={"ID":"450179f7-baf0-481d-ad0e-4d3534ee28f4","Type":"ContainerStarted","Data":"7bac7bd9dc077623d52482f17b20a5571b728ffdb858f67e1e4e5e42bda7a265"} Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.260354 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-67f98f67b9-25fgx" podStartSLOduration=3.360380705 podStartE2EDuration="38.26033431s" podCreationTimestamp="2025-11-26 05:43:37 +0000 UTC" firstStartedPulling="2025-11-26 05:43:39.11296776 +0000 UTC m=+1077.296019336" lastFinishedPulling="2025-11-26 05:44:14.012921345 +0000 UTC m=+1112.195972941" observedRunningTime="2025-11-26 05:44:15.256684599 +0000 UTC m=+1113.439736185" watchObservedRunningTime="2025-11-26 05:44:15.26033431 +0000 UTC m=+1113.443385896" Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.283942 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-6z5bf" podStartSLOduration=2.51042851 podStartE2EDuration="37.283922406s" podCreationTimestamp="2025-11-26 05:43:38 +0000 UTC" firstStartedPulling="2025-11-26 05:43:39.326589615 +0000 UTC m=+1077.509641201" lastFinishedPulling="2025-11-26 05:44:14.100083501 +0000 UTC m=+1112.283135097" observedRunningTime="2025-11-26 05:44:15.275836235 +0000 UTC m=+1113.458887821" watchObservedRunningTime="2025-11-26 05:44:15.283922406 +0000 UTC m=+1113.466973992" Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.298677 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-lwfp5" event={"ID":"e72bbf93-367f-4207-b846-b9cf819b9b4c","Type":"ContainerStarted","Data":"bb34da1a086146aa7815a37eef79a6585c3a58342bfd9113c0a6818bf15c248f"} Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.321802 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-lwfp5" podStartSLOduration=3.435362766 podStartE2EDuration="38.321784967s" podCreationTimestamp="2025-11-26 05:43:37 +0000 UTC" firstStartedPulling="2025-11-26 05:43:39.326516674 +0000 UTC m=+1077.509568260" lastFinishedPulling="2025-11-26 05:44:14.212938885 +0000 UTC m=+1112.395990461" observedRunningTime="2025-11-26 05:44:15.312915396 +0000 UTC m=+1113.495966982" watchObservedRunningTime="2025-11-26 05:44:15.321784967 +0000 UTC m=+1113.504836553" Nov 26 05:44:15 crc kubenswrapper[4871]: E1126 05:44:15.324031 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-cinder-api:current\\\"\"" pod="openstack/cinder-db-sync-7pzbd" podUID="c8bdb9c7-91c3-40dc-920e-6e333b18f331" Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.432336 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7648c6b969-hsbsf"] Nov 26 05:44:15 crc kubenswrapper[4871]: W1126 05:44:15.486600 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod196ce4b4_28af_4295_b20e_4d1cfa847b27.slice/crio-dad594202630ab8d8490f9f9340ba70bd5980a82244816242ad725613b307b8b WatchSource:0}: Error finding container dad594202630ab8d8490f9f9340ba70bd5980a82244816242ad725613b307b8b: Status 404 returned error can't find the container with id dad594202630ab8d8490f9f9340ba70bd5980a82244816242ad725613b307b8b Nov 26 05:44:15 crc kubenswrapper[4871]: I1126 05:44:15.694146 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-69c5fdfb8b-95x78"] Nov 26 05:44:15 crc kubenswrapper[4871]: W1126 05:44:15.752394 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1d07d92e_a5ac_479d_b6e3_2e175c5a6fda.slice/crio-f6c3d3ac236b7d899f6a7270442b6fb076af5b98120629a770ff845dd597dd59 WatchSource:0}: Error finding container f6c3d3ac236b7d899f6a7270442b6fb076af5b98120629a770ff845dd597dd59: Status 404 returned error can't find the container with id f6c3d3ac236b7d899f6a7270442b6fb076af5b98120629a770ff845dd597dd59 Nov 26 05:44:16 crc kubenswrapper[4871]: I1126 05:44:16.330114 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"711348b2-05b6-4d20-8eea-c2e19c4dc949","Type":"ContainerStarted","Data":"31a0c5422d918660b09f105e0ea690cf93ca74ecb902b0280e6fc0393e4671ee"} Nov 26 05:44:16 crc kubenswrapper[4871]: I1126 05:44:16.330485 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 26 05:44:16 crc kubenswrapper[4871]: I1126 05:44:16.342788 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"906807e1-f724-4ab4-9ccc-95656188890e","Type":"ContainerStarted","Data":"465e559dca887fb0305a03f78054201387518d7c17dc6ea0b8151956c786aba2"} Nov 26 05:44:16 crc kubenswrapper[4871]: I1126 05:44:16.349771 4871 generic.go:334] "Generic (PLEG): container finished" podID="196ce4b4-28af-4295-b20e-4d1cfa847b27" containerID="11e4db97f8d04172acb4cb8165ff42f9de1bd3ca643bed403f6ecd7b4a7362b5" exitCode=0 Nov 26 05:44:16 crc kubenswrapper[4871]: I1126 05:44:16.349909 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" event={"ID":"196ce4b4-28af-4295-b20e-4d1cfa847b27","Type":"ContainerDied","Data":"11e4db97f8d04172acb4cb8165ff42f9de1bd3ca643bed403f6ecd7b4a7362b5"} Nov 26 05:44:16 crc kubenswrapper[4871]: I1126 05:44:16.349972 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" event={"ID":"196ce4b4-28af-4295-b20e-4d1cfa847b27","Type":"ContainerStarted","Data":"dad594202630ab8d8490f9f9340ba70bd5980a82244816242ad725613b307b8b"} Nov 26 05:44:16 crc kubenswrapper[4871]: I1126 05:44:16.366856 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-kkjjm" event={"ID":"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e","Type":"ContainerStarted","Data":"7ac7ebd344eab8892ece8a92a1b84eb468249911332dc50689e62648d49b49c7"} Nov 26 05:44:16 crc kubenswrapper[4871]: I1126 05:44:16.368408 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=33.368391571 podStartE2EDuration="33.368391571s" podCreationTimestamp="2025-11-26 05:43:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:16.345634776 +0000 UTC m=+1114.528686362" watchObservedRunningTime="2025-11-26 05:44:16.368391571 +0000 UTC m=+1114.551443157" Nov 26 05:44:16 crc kubenswrapper[4871]: I1126 05:44:16.381007 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7cbf6bc784-rm6hn" event={"ID":"4a2ec979-4e84-42ce-9299-8b9f5d88f001","Type":"ContainerStarted","Data":"fcd72689b6654bdd982bdce1e63525f2f58a6a2e64f4a4fc737548130d68b840"} Nov 26 05:44:16 crc kubenswrapper[4871]: I1126 05:44:16.381057 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7cbf6bc784-rm6hn" event={"ID":"4a2ec979-4e84-42ce-9299-8b9f5d88f001","Type":"ContainerStarted","Data":"d0544269fd1e09110185d29e279719c2544edf741f688f6ee1eb277164a31f9f"} Nov 26 05:44:16 crc kubenswrapper[4871]: I1126 05:44:16.396793 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-69c5fdfb8b-95x78" event={"ID":"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda","Type":"ContainerStarted","Data":"f6c3d3ac236b7d899f6a7270442b6fb076af5b98120629a770ff845dd597dd59"} Nov 26 05:44:16 crc kubenswrapper[4871]: I1126 05:44:16.399812 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8665945b44-wbcwv" event={"ID":"812fa0f1-c216-4db1-b3e6-cfa862b8cb93","Type":"ContainerStarted","Data":"c136384f154b1d112425776e2afc3e7ec4248dce8ee44c4fa5505218badfb75c"} Nov 26 05:44:16 crc kubenswrapper[4871]: I1126 05:44:16.402479 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-kkjjm" podStartSLOduration=18.402439567 podStartE2EDuration="18.402439567s" podCreationTimestamp="2025-11-26 05:43:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:16.395032833 +0000 UTC m=+1114.578084419" watchObservedRunningTime="2025-11-26 05:44:16.402439567 +0000 UTC m=+1114.585491163" Nov 26 05:44:16 crc kubenswrapper[4871]: I1126 05:44:16.430168 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7cbf6bc784-rm6hn" podStartSLOduration=30.430148946 podStartE2EDuration="30.430148946s" podCreationTimestamp="2025-11-26 05:43:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:16.412893447 +0000 UTC m=+1114.595945073" watchObservedRunningTime="2025-11-26 05:44:16.430148946 +0000 UTC m=+1114.613200532" Nov 26 05:44:16 crc kubenswrapper[4871]: I1126 05:44:16.448275 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-8665945b44-wbcwv" podStartSLOduration=30.448257596 podStartE2EDuration="30.448257596s" podCreationTimestamp="2025-11-26 05:43:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:16.435849338 +0000 UTC m=+1114.618900924" watchObservedRunningTime="2025-11-26 05:44:16.448257596 +0000 UTC m=+1114.631309182" Nov 26 05:44:16 crc kubenswrapper[4871]: I1126 05:44:16.523488 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc4bffe3-86fd-478d-95cf-edf716bbf3f2" path="/var/lib/kubelet/pods/bc4bffe3-86fd-478d-95cf-edf716bbf3f2/volumes" Nov 26 05:44:16 crc kubenswrapper[4871]: I1126 05:44:16.531234 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbb025f2-31cc-41c2-886d-4cd68ed17d44" path="/var/lib/kubelet/pods/dbb025f2-31cc-41c2-886d-4cd68ed17d44/volumes" Nov 26 05:44:16 crc kubenswrapper[4871]: I1126 05:44:16.971254 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:44:16 crc kubenswrapper[4871]: I1126 05:44:16.971502 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.071781 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.071812 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.378139 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5566bf8457-7qhhj"] Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.380302 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.386074 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.395245 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.402257 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5566bf8457-7qhhj"] Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.402294 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-public-tls-certs\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.402335 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glbsm\" (UniqueName: \"kubernetes.io/projected/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-kube-api-access-glbsm\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.402375 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-config\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.402509 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-httpd-config\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.402584 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-combined-ca-bundle\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.402708 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-ovndb-tls-certs\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.402773 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-internal-tls-certs\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.419794 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-69c5fdfb8b-95x78" event={"ID":"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda","Type":"ContainerStarted","Data":"6fa2f0a36e08b2b0a47a777f1517f4b1b54854205106229f6ac85b7fa7095be1"} Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.429933 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" event={"ID":"196ce4b4-28af-4295-b20e-4d1cfa847b27","Type":"ContainerStarted","Data":"6a1be64c982f17ed763f56f7483ff8f43ad84ec11a54f1b3259d818e95bd917b"} Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.431501 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.449695 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" podStartSLOduration=3.449674887 podStartE2EDuration="3.449674887s" podCreationTimestamp="2025-11-26 05:44:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:17.448975289 +0000 UTC m=+1115.632026875" watchObservedRunningTime="2025-11-26 05:44:17.449674887 +0000 UTC m=+1115.632726473" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.505131 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-public-tls-certs\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.505224 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glbsm\" (UniqueName: \"kubernetes.io/projected/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-kube-api-access-glbsm\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.505298 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-config\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.505372 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-httpd-config\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.505405 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-combined-ca-bundle\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.505548 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-ovndb-tls-certs\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.505605 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-internal-tls-certs\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.534081 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-public-tls-certs\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.534797 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-internal-tls-certs\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.538705 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glbsm\" (UniqueName: \"kubernetes.io/projected/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-kube-api-access-glbsm\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.544388 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-httpd-config\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.546328 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-config\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.556367 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-ovndb-tls-certs\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.562516 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dc2c737-ebec-4a5a-b06b-ffc355fb0a77-combined-ca-bundle\") pod \"neutron-5566bf8457-7qhhj\" (UID: \"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77\") " pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:17 crc kubenswrapper[4871]: I1126 05:44:17.709945 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:18 crc kubenswrapper[4871]: I1126 05:44:18.307996 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:44:19 crc kubenswrapper[4871]: I1126 05:44:19.311756 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 26 05:44:19 crc kubenswrapper[4871]: I1126 05:44:19.312074 4871 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 05:44:21 crc kubenswrapper[4871]: I1126 05:44:21.371850 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="711348b2-05b6-4d20-8eea-c2e19c4dc949" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.162:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 05:44:22 crc kubenswrapper[4871]: I1126 05:44:22.759345 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 26 05:44:23 crc kubenswrapper[4871]: I1126 05:44:23.614616 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:44:23 crc kubenswrapper[4871]: I1126 05:44:23.615242 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:44:23 crc kubenswrapper[4871]: I1126 05:44:23.924134 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5566bf8457-7qhhj"] Nov 26 05:44:23 crc kubenswrapper[4871]: W1126 05:44:23.933444 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7dc2c737_ebec_4a5a_b06b_ffc355fb0a77.slice/crio-a453f742ccfeb7d1b4da4f4ae9d792adfed53ccc32eeb0bc46e030013309448a WatchSource:0}: Error finding container a453f742ccfeb7d1b4da4f4ae9d792adfed53ccc32eeb0bc46e030013309448a: Status 404 returned error can't find the container with id a453f742ccfeb7d1b4da4f4ae9d792adfed53ccc32eeb0bc46e030013309448a Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.312334 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.329017 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.552374 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-9kf7b" event={"ID":"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6","Type":"ContainerStarted","Data":"5d01001680921a32f895fff185bc882d16b7258d62ddfedd545f48e12e45a0fc"} Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.556245 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"e939bb2f-dadb-4353-8845-f31c42b87a75","Type":"ContainerStarted","Data":"171552e52a7131c31ba48cc3849237dfdcdd697ae95673de2c326af2223a7540"} Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.561027 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b724414-8682-4e73-8b2d-305fce381613","Type":"ContainerStarted","Data":"9abd53467965e5620a4e437d0c8a83aa5ab6bc0029e0629f4ee0ba9fe83d1794"} Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.566196 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-69c5fdfb8b-95x78" event={"ID":"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda","Type":"ContainerStarted","Data":"4fc8075f7ea2c2d8e90856b0d0dae6ae981048a1d2ec0c27926f0418a51e4749"} Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.566939 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.570416 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-9kf7b" podStartSLOduration=3.08749682 podStartE2EDuration="49.570403893s" podCreationTimestamp="2025-11-26 05:43:35 +0000 UTC" firstStartedPulling="2025-11-26 05:43:36.905702237 +0000 UTC m=+1075.088753823" lastFinishedPulling="2025-11-26 05:44:23.38860931 +0000 UTC m=+1121.571660896" observedRunningTime="2025-11-26 05:44:24.569682165 +0000 UTC m=+1122.752733751" watchObservedRunningTime="2025-11-26 05:44:24.570403893 +0000 UTC m=+1122.753455489" Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.574388 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"906807e1-f724-4ab4-9ccc-95656188890e","Type":"ContainerStarted","Data":"e76e1839b021122d94130fc2fe31234ee90db31804ad29bff7d87b4569898f52"} Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.588692 4871 generic.go:334] "Generic (PLEG): container finished" podID="c1ff3641-2ac2-4223-b2e5-c0bd333bec1e" containerID="7ac7ebd344eab8892ece8a92a1b84eb468249911332dc50689e62648d49b49c7" exitCode=0 Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.588774 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-kkjjm" event={"ID":"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e","Type":"ContainerDied","Data":"7ac7ebd344eab8892ece8a92a1b84eb468249911332dc50689e62648d49b49c7"} Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.593847 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-applier-0" podStartSLOduration=33.222138887 podStartE2EDuration="41.593831515s" podCreationTimestamp="2025-11-26 05:43:43 +0000 UTC" firstStartedPulling="2025-11-26 05:44:15.011484507 +0000 UTC m=+1113.194536093" lastFinishedPulling="2025-11-26 05:44:23.383177135 +0000 UTC m=+1121.566228721" observedRunningTime="2025-11-26 05:44:24.58800706 +0000 UTC m=+1122.771058646" watchObservedRunningTime="2025-11-26 05:44:24.593831515 +0000 UTC m=+1122.776883091" Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.598619 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5566bf8457-7qhhj" event={"ID":"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77","Type":"ContainerStarted","Data":"2abddcec12dbfbbb1ba01822dfe676bab883bd36814ef4b3d615546c4f051d85"} Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.598662 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5566bf8457-7qhhj" event={"ID":"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77","Type":"ContainerStarted","Data":"8fd63dc13e8913765ccf2bd4424033ee67b32ff7b7192144d4e60acf481fe880"} Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.598674 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5566bf8457-7qhhj" event={"ID":"7dc2c737-ebec-4a5a-b06b-ffc355fb0a77","Type":"ContainerStarted","Data":"a453f742ccfeb7d1b4da4f4ae9d792adfed53ccc32eeb0bc46e030013309448a"} Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.616228 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.633041 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-69c5fdfb8b-95x78" podStartSLOduration=10.633018508 podStartE2EDuration="10.633018508s" podCreationTimestamp="2025-11-26 05:44:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:24.605250898 +0000 UTC m=+1122.788302474" watchObservedRunningTime="2025-11-26 05:44:24.633018508 +0000 UTC m=+1122.816070094" Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.671721 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=33.458910821 podStartE2EDuration="41.67170119s" podCreationTimestamp="2025-11-26 05:43:43 +0000 UTC" firstStartedPulling="2025-11-26 05:44:15.171022201 +0000 UTC m=+1113.354073787" lastFinishedPulling="2025-11-26 05:44:23.38381257 +0000 UTC m=+1121.566864156" observedRunningTime="2025-11-26 05:44:24.642860373 +0000 UTC m=+1122.825911959" watchObservedRunningTime="2025-11-26 05:44:24.67170119 +0000 UTC m=+1122.854752776" Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.797657 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.879508 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-578598f949-zhj9v"] Nov 26 05:44:24 crc kubenswrapper[4871]: I1126 05:44:24.879780 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-578598f949-zhj9v" podUID="2788cfc5-005e-4f99-83ac-9011cbe838cf" containerName="dnsmasq-dns" containerID="cri-o://fd3dd424b7f77f26292a1ad9b0ed4f6c67795e95421c9982a1f419c27ab2abec" gracePeriod=10 Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.460109 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.468212 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-ovsdbserver-nb\") pod \"2788cfc5-005e-4f99-83ac-9011cbe838cf\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.534112 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2788cfc5-005e-4f99-83ac-9011cbe838cf" (UID: "2788cfc5-005e-4f99-83ac-9011cbe838cf"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.570204 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-ovsdbserver-sb\") pod \"2788cfc5-005e-4f99-83ac-9011cbe838cf\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.570263 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-dns-swift-storage-0\") pod \"2788cfc5-005e-4f99-83ac-9011cbe838cf\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.570354 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-dns-svc\") pod \"2788cfc5-005e-4f99-83ac-9011cbe838cf\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.570374 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-config\") pod \"2788cfc5-005e-4f99-83ac-9011cbe838cf\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.570410 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5tsq\" (UniqueName: \"kubernetes.io/projected/2788cfc5-005e-4f99-83ac-9011cbe838cf-kube-api-access-v5tsq\") pod \"2788cfc5-005e-4f99-83ac-9011cbe838cf\" (UID: \"2788cfc5-005e-4f99-83ac-9011cbe838cf\") " Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.570766 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.574828 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2788cfc5-005e-4f99-83ac-9011cbe838cf-kube-api-access-v5tsq" (OuterVolumeSpecName: "kube-api-access-v5tsq") pod "2788cfc5-005e-4f99-83ac-9011cbe838cf" (UID: "2788cfc5-005e-4f99-83ac-9011cbe838cf"). InnerVolumeSpecName "kube-api-access-v5tsq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.634296 4871 generic.go:334] "Generic (PLEG): container finished" podID="2788cfc5-005e-4f99-83ac-9011cbe838cf" containerID="fd3dd424b7f77f26292a1ad9b0ed4f6c67795e95421c9982a1f419c27ab2abec" exitCode=0 Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.634402 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-578598f949-zhj9v" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.634452 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578598f949-zhj9v" event={"ID":"2788cfc5-005e-4f99-83ac-9011cbe838cf","Type":"ContainerDied","Data":"fd3dd424b7f77f26292a1ad9b0ed4f6c67795e95421c9982a1f419c27ab2abec"} Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.634480 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578598f949-zhj9v" event={"ID":"2788cfc5-005e-4f99-83ac-9011cbe838cf","Type":"ContainerDied","Data":"0871c35cba3d17d4152761d76f60d886f59d320a796ac5c03a1935d893d185b9"} Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.634496 4871 scope.go:117] "RemoveContainer" containerID="fd3dd424b7f77f26292a1ad9b0ed4f6c67795e95421c9982a1f419c27ab2abec" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.641382 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2788cfc5-005e-4f99-83ac-9011cbe838cf" (UID: "2788cfc5-005e-4f99-83ac-9011cbe838cf"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.671791 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2788cfc5-005e-4f99-83ac-9011cbe838cf" (UID: "2788cfc5-005e-4f99-83ac-9011cbe838cf"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.672214 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.672245 4871 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.672254 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5tsq\" (UniqueName: \"kubernetes.io/projected/2788cfc5-005e-4f99-83ac-9011cbe838cf-kube-api-access-v5tsq\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.703603 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5566bf8457-7qhhj" podStartSLOduration=8.703507237 podStartE2EDuration="8.703507237s" podCreationTimestamp="2025-11-26 05:44:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:25.65735523 +0000 UTC m=+1123.840406846" watchObservedRunningTime="2025-11-26 05:44:25.703507237 +0000 UTC m=+1123.886558813" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.703837 4871 scope.go:117] "RemoveContainer" containerID="67005b550bd76231f9967f6ba0536c9d13c10df2f1ef507fd3f567579baee20e" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.708040 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2788cfc5-005e-4f99-83ac-9011cbe838cf" (UID: "2788cfc5-005e-4f99-83ac-9011cbe838cf"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.722959 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-config" (OuterVolumeSpecName: "config") pod "2788cfc5-005e-4f99-83ac-9011cbe838cf" (UID: "2788cfc5-005e-4f99-83ac-9011cbe838cf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.735768 4871 scope.go:117] "RemoveContainer" containerID="fd3dd424b7f77f26292a1ad9b0ed4f6c67795e95421c9982a1f419c27ab2abec" Nov 26 05:44:25 crc kubenswrapper[4871]: E1126 05:44:25.739969 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd3dd424b7f77f26292a1ad9b0ed4f6c67795e95421c9982a1f419c27ab2abec\": container with ID starting with fd3dd424b7f77f26292a1ad9b0ed4f6c67795e95421c9982a1f419c27ab2abec not found: ID does not exist" containerID="fd3dd424b7f77f26292a1ad9b0ed4f6c67795e95421c9982a1f419c27ab2abec" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.740004 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd3dd424b7f77f26292a1ad9b0ed4f6c67795e95421c9982a1f419c27ab2abec"} err="failed to get container status \"fd3dd424b7f77f26292a1ad9b0ed4f6c67795e95421c9982a1f419c27ab2abec\": rpc error: code = NotFound desc = could not find container \"fd3dd424b7f77f26292a1ad9b0ed4f6c67795e95421c9982a1f419c27ab2abec\": container with ID starting with fd3dd424b7f77f26292a1ad9b0ed4f6c67795e95421c9982a1f419c27ab2abec not found: ID does not exist" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.740029 4871 scope.go:117] "RemoveContainer" containerID="67005b550bd76231f9967f6ba0536c9d13c10df2f1ef507fd3f567579baee20e" Nov 26 05:44:25 crc kubenswrapper[4871]: E1126 05:44:25.743888 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67005b550bd76231f9967f6ba0536c9d13c10df2f1ef507fd3f567579baee20e\": container with ID starting with 67005b550bd76231f9967f6ba0536c9d13c10df2f1ef507fd3f567579baee20e not found: ID does not exist" containerID="67005b550bd76231f9967f6ba0536c9d13c10df2f1ef507fd3f567579baee20e" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.743922 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67005b550bd76231f9967f6ba0536c9d13c10df2f1ef507fd3f567579baee20e"} err="failed to get container status \"67005b550bd76231f9967f6ba0536c9d13c10df2f1ef507fd3f567579baee20e\": rpc error: code = NotFound desc = could not find container \"67005b550bd76231f9967f6ba0536c9d13c10df2f1ef507fd3f567579baee20e\": container with ID starting with 67005b550bd76231f9967f6ba0536c9d13c10df2f1ef507fd3f567579baee20e not found: ID does not exist" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.773516 4871 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:25 crc kubenswrapper[4871]: I1126 05:44:25.773557 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2788cfc5-005e-4f99-83ac-9011cbe838cf-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.033819 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.056260 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-578598f949-zhj9v"] Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.084604 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-578598f949-zhj9v"] Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.182860 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-combined-ca-bundle\") pod \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.182951 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-scripts\") pod \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.183014 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-fernet-keys\") pod \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.183196 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-config-data\") pod \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.183225 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgbq2\" (UniqueName: \"kubernetes.io/projected/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-kube-api-access-bgbq2\") pod \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.183310 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-credential-keys\") pod \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\" (UID: \"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e\") " Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.199517 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "c1ff3641-2ac2-4223-b2e5-c0bd333bec1e" (UID: "c1ff3641-2ac2-4223-b2e5-c0bd333bec1e"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.200677 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "c1ff3641-2ac2-4223-b2e5-c0bd333bec1e" (UID: "c1ff3641-2ac2-4223-b2e5-c0bd333bec1e"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.202966 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-scripts" (OuterVolumeSpecName: "scripts") pod "c1ff3641-2ac2-4223-b2e5-c0bd333bec1e" (UID: "c1ff3641-2ac2-4223-b2e5-c0bd333bec1e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.205353 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-kube-api-access-bgbq2" (OuterVolumeSpecName: "kube-api-access-bgbq2") pod "c1ff3641-2ac2-4223-b2e5-c0bd333bec1e" (UID: "c1ff3641-2ac2-4223-b2e5-c0bd333bec1e"). InnerVolumeSpecName "kube-api-access-bgbq2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.225689 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-config-data" (OuterVolumeSpecName: "config-data") pod "c1ff3641-2ac2-4223-b2e5-c0bd333bec1e" (UID: "c1ff3641-2ac2-4223-b2e5-c0bd333bec1e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.236865 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c1ff3641-2ac2-4223-b2e5-c0bd333bec1e" (UID: "c1ff3641-2ac2-4223-b2e5-c0bd333bec1e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.285818 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.285863 4871 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.285879 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.285890 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgbq2\" (UniqueName: \"kubernetes.io/projected/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-kube-api-access-bgbq2\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.285906 4871 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.285917 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.554123 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2788cfc5-005e-4f99-83ac-9011cbe838cf" path="/var/lib/kubelet/pods/2788cfc5-005e-4f99-83ac-9011cbe838cf/volumes" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.649627 4871 generic.go:334] "Generic (PLEG): container finished" podID="450179f7-baf0-481d-ad0e-4d3534ee28f4" containerID="7bac7bd9dc077623d52482f17b20a5571b728ffdb858f67e1e4e5e42bda7a265" exitCode=0 Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.649705 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-6z5bf" event={"ID":"450179f7-baf0-481d-ad0e-4d3534ee28f4","Type":"ContainerDied","Data":"7bac7bd9dc077623d52482f17b20a5571b728ffdb858f67e1e4e5e42bda7a265"} Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.661034 4871 generic.go:334] "Generic (PLEG): container finished" podID="e72bbf93-367f-4207-b846-b9cf819b9b4c" containerID="bb34da1a086146aa7815a37eef79a6585c3a58342bfd9113c0a6818bf15c248f" exitCode=0 Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.661098 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-lwfp5" event={"ID":"e72bbf93-367f-4207-b846-b9cf819b9b4c","Type":"ContainerDied","Data":"bb34da1a086146aa7815a37eef79a6585c3a58342bfd9113c0a6818bf15c248f"} Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.674181 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-kkjjm" event={"ID":"c1ff3641-2ac2-4223-b2e5-c0bd333bec1e","Type":"ContainerDied","Data":"3ea3ecce11cb20df2f874230ffe699b89a37309508cf5064bab7cc2d252daf16"} Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.674218 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3ea3ecce11cb20df2f874230ffe699b89a37309508cf5064bab7cc2d252daf16" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.674284 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-kkjjm" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.751995 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-847fdf8fc-mswx4"] Nov 26 05:44:26 crc kubenswrapper[4871]: E1126 05:44:26.752420 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2788cfc5-005e-4f99-83ac-9011cbe838cf" containerName="init" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.752437 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="2788cfc5-005e-4f99-83ac-9011cbe838cf" containerName="init" Nov 26 05:44:26 crc kubenswrapper[4871]: E1126 05:44:26.752472 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1ff3641-2ac2-4223-b2e5-c0bd333bec1e" containerName="keystone-bootstrap" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.752480 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1ff3641-2ac2-4223-b2e5-c0bd333bec1e" containerName="keystone-bootstrap" Nov 26 05:44:26 crc kubenswrapper[4871]: E1126 05:44:26.752492 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2788cfc5-005e-4f99-83ac-9011cbe838cf" containerName="dnsmasq-dns" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.752500 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="2788cfc5-005e-4f99-83ac-9011cbe838cf" containerName="dnsmasq-dns" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.752817 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="2788cfc5-005e-4f99-83ac-9011cbe838cf" containerName="dnsmasq-dns" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.752853 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1ff3641-2ac2-4223-b2e5-c0bd333bec1e" containerName="keystone-bootstrap" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.753641 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.757040 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-w7bjw" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.760773 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.760920 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.761019 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.761555 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.763817 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.775189 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-847fdf8fc-mswx4"] Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.903787 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-scripts\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.903832 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-config-data\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.903862 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-internal-tls-certs\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.903903 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-combined-ca-bundle\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.903973 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-public-tls-certs\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.904007 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-credential-keys\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.904031 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-fernet-keys\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.904057 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6vc4\" (UniqueName: \"kubernetes.io/projected/609a98bb-6812-4d0f-b408-023056fc5bca-kube-api-access-z6vc4\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:26 crc kubenswrapper[4871]: I1126 05:44:26.976160 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-8665945b44-wbcwv" podUID="812fa0f1-c216-4db1-b3e6-cfa862b8cb93" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.163:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.163:8443: connect: connection refused" Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.005677 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-internal-tls-certs\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.005748 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-combined-ca-bundle\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.005822 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-public-tls-certs\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.005878 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-credential-keys\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.005902 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-fernet-keys\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.005931 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6vc4\" (UniqueName: \"kubernetes.io/projected/609a98bb-6812-4d0f-b408-023056fc5bca-kube-api-access-z6vc4\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.005961 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-scripts\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.005982 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-config-data\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.011169 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-combined-ca-bundle\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.012790 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-scripts\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.016867 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-public-tls-certs\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.018403 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-config-data\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.021145 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-fernet-keys\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.024835 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-credential-keys\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.026778 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6vc4\" (UniqueName: \"kubernetes.io/projected/609a98bb-6812-4d0f-b408-023056fc5bca-kube-api-access-z6vc4\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.037247 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/609a98bb-6812-4d0f-b408-023056fc5bca-internal-tls-certs\") pod \"keystone-847fdf8fc-mswx4\" (UID: \"609a98bb-6812-4d0f-b408-023056fc5bca\") " pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.074788 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7cbf6bc784-rm6hn" podUID="4a2ec979-4e84-42ce-9299-8b9f5d88f001" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.164:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.164:8443: connect: connection refused" Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.088496 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.693842 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-847fdf8fc-mswx4"] Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.787236 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.787443 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="711348b2-05b6-4d20-8eea-c2e19c4dc949" containerName="watcher-api-log" containerID="cri-o://5a34ebe42c2186a08d73e4fceb07a09f26bb1c87ad34e154116e0f781969d17f" gracePeriod=30 Nov 26 05:44:27 crc kubenswrapper[4871]: I1126 05:44:27.787922 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="711348b2-05b6-4d20-8eea-c2e19c4dc949" containerName="watcher-api" containerID="cri-o://31a0c5422d918660b09f105e0ea690cf93ca74ecb902b0280e6fc0393e4671ee" gracePeriod=30 Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.038008 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-6z5bf" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.132124 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hm6dx\" (UniqueName: \"kubernetes.io/projected/450179f7-baf0-481d-ad0e-4d3534ee28f4-kube-api-access-hm6dx\") pod \"450179f7-baf0-481d-ad0e-4d3534ee28f4\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.132178 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/450179f7-baf0-481d-ad0e-4d3534ee28f4-logs\") pod \"450179f7-baf0-481d-ad0e-4d3534ee28f4\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.132215 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/450179f7-baf0-481d-ad0e-4d3534ee28f4-scripts\") pod \"450179f7-baf0-481d-ad0e-4d3534ee28f4\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.132236 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/450179f7-baf0-481d-ad0e-4d3534ee28f4-config-data\") pod \"450179f7-baf0-481d-ad0e-4d3534ee28f4\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.132285 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/450179f7-baf0-481d-ad0e-4d3534ee28f4-combined-ca-bundle\") pod \"450179f7-baf0-481d-ad0e-4d3534ee28f4\" (UID: \"450179f7-baf0-481d-ad0e-4d3534ee28f4\") " Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.133219 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/450179f7-baf0-481d-ad0e-4d3534ee28f4-logs" (OuterVolumeSpecName: "logs") pod "450179f7-baf0-481d-ad0e-4d3534ee28f4" (UID: "450179f7-baf0-481d-ad0e-4d3534ee28f4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.138802 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/450179f7-baf0-481d-ad0e-4d3534ee28f4-scripts" (OuterVolumeSpecName: "scripts") pod "450179f7-baf0-481d-ad0e-4d3534ee28f4" (UID: "450179f7-baf0-481d-ad0e-4d3534ee28f4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.140488 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/450179f7-baf0-481d-ad0e-4d3534ee28f4-kube-api-access-hm6dx" (OuterVolumeSpecName: "kube-api-access-hm6dx") pod "450179f7-baf0-481d-ad0e-4d3534ee28f4" (UID: "450179f7-baf0-481d-ad0e-4d3534ee28f4"). InnerVolumeSpecName "kube-api-access-hm6dx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.188123 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/450179f7-baf0-481d-ad0e-4d3534ee28f4-config-data" (OuterVolumeSpecName: "config-data") pod "450179f7-baf0-481d-ad0e-4d3534ee28f4" (UID: "450179f7-baf0-481d-ad0e-4d3534ee28f4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.196709 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/450179f7-baf0-481d-ad0e-4d3534ee28f4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "450179f7-baf0-481d-ad0e-4d3534ee28f4" (UID: "450179f7-baf0-481d-ad0e-4d3534ee28f4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.234308 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hm6dx\" (UniqueName: \"kubernetes.io/projected/450179f7-baf0-481d-ad0e-4d3534ee28f4-kube-api-access-hm6dx\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.234352 4871 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/450179f7-baf0-481d-ad0e-4d3534ee28f4-logs\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.234367 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/450179f7-baf0-481d-ad0e-4d3534ee28f4-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.234378 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/450179f7-baf0-481d-ad0e-4d3534ee28f4-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.234391 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/450179f7-baf0-481d-ad0e-4d3534ee28f4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.284881 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-lwfp5" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.437275 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztplr\" (UniqueName: \"kubernetes.io/projected/e72bbf93-367f-4207-b846-b9cf819b9b4c-kube-api-access-ztplr\") pod \"e72bbf93-367f-4207-b846-b9cf819b9b4c\" (UID: \"e72bbf93-367f-4207-b846-b9cf819b9b4c\") " Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.437418 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e72bbf93-367f-4207-b846-b9cf819b9b4c-combined-ca-bundle\") pod \"e72bbf93-367f-4207-b846-b9cf819b9b4c\" (UID: \"e72bbf93-367f-4207-b846-b9cf819b9b4c\") " Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.437537 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e72bbf93-367f-4207-b846-b9cf819b9b4c-db-sync-config-data\") pod \"e72bbf93-367f-4207-b846-b9cf819b9b4c\" (UID: \"e72bbf93-367f-4207-b846-b9cf819b9b4c\") " Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.445681 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e72bbf93-367f-4207-b846-b9cf819b9b4c-kube-api-access-ztplr" (OuterVolumeSpecName: "kube-api-access-ztplr") pod "e72bbf93-367f-4207-b846-b9cf819b9b4c" (UID: "e72bbf93-367f-4207-b846-b9cf819b9b4c"). InnerVolumeSpecName "kube-api-access-ztplr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.445794 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e72bbf93-367f-4207-b846-b9cf819b9b4c-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "e72bbf93-367f-4207-b846-b9cf819b9b4c" (UID: "e72bbf93-367f-4207-b846-b9cf819b9b4c"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.467659 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e72bbf93-367f-4207-b846-b9cf819b9b4c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e72bbf93-367f-4207-b846-b9cf819b9b4c" (UID: "e72bbf93-367f-4207-b846-b9cf819b9b4c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.540935 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztplr\" (UniqueName: \"kubernetes.io/projected/e72bbf93-367f-4207-b846-b9cf819b9b4c-kube-api-access-ztplr\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.541215 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e72bbf93-367f-4207-b846-b9cf819b9b4c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.541225 4871 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e72bbf93-367f-4207-b846-b9cf819b9b4c-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.737644 4871 generic.go:334] "Generic (PLEG): container finished" podID="711348b2-05b6-4d20-8eea-c2e19c4dc949" containerID="5a34ebe42c2186a08d73e4fceb07a09f26bb1c87ad34e154116e0f781969d17f" exitCode=143 Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.737719 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"711348b2-05b6-4d20-8eea-c2e19c4dc949","Type":"ContainerDied","Data":"5a34ebe42c2186a08d73e4fceb07a09f26bb1c87ad34e154116e0f781969d17f"} Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.754224 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-847fdf8fc-mswx4" event={"ID":"609a98bb-6812-4d0f-b408-023056fc5bca","Type":"ContainerStarted","Data":"9bfdc9181b736e36283a9b4411aacf2862d6a177fb93e87297754c33431db90f"} Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.754304 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-847fdf8fc-mswx4" event={"ID":"609a98bb-6812-4d0f-b408-023056fc5bca","Type":"ContainerStarted","Data":"0c3deb5dbd8777a3cbb812b8c3829e9d7d154d13400a42799f00d3b22e053008"} Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.755283 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.763430 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-6z5bf" event={"ID":"450179f7-baf0-481d-ad0e-4d3534ee28f4","Type":"ContainerDied","Data":"25421678217b9c7800f001ab6e440214418c18f7f034ccdd1269ba19c8c287e0"} Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.763455 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="25421678217b9c7800f001ab6e440214418c18f7f034ccdd1269ba19c8c287e0" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.763507 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-6z5bf" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.791996 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-lwfp5" event={"ID":"e72bbf93-367f-4207-b846-b9cf819b9b4c","Type":"ContainerDied","Data":"9e9cb5ad259ed221b84957b20ad52a59114f07f826ec1ceaacc4c74790489e87"} Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.792055 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9e9cb5ad259ed221b84957b20ad52a59114f07f826ec1ceaacc4c74790489e87" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.792127 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-lwfp5" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.793007 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-847fdf8fc-mswx4" podStartSLOduration=2.792984949 podStartE2EDuration="2.792984949s" podCreationTimestamp="2025-11-26 05:44:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:28.772950922 +0000 UTC m=+1126.956002508" watchObservedRunningTime="2025-11-26 05:44:28.792984949 +0000 UTC m=+1126.976036555" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.938837 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-665fcf99fb-m82r7"] Nov 26 05:44:28 crc kubenswrapper[4871]: E1126 05:44:28.940134 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="450179f7-baf0-481d-ad0e-4d3534ee28f4" containerName="placement-db-sync" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.940158 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="450179f7-baf0-481d-ad0e-4d3534ee28f4" containerName="placement-db-sync" Nov 26 05:44:28 crc kubenswrapper[4871]: E1126 05:44:28.940182 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e72bbf93-367f-4207-b846-b9cf819b9b4c" containerName="barbican-db-sync" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.940189 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="e72bbf93-367f-4207-b846-b9cf819b9b4c" containerName="barbican-db-sync" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.940383 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="e72bbf93-367f-4207-b846-b9cf819b9b4c" containerName="barbican-db-sync" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.940397 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="450179f7-baf0-481d-ad0e-4d3534ee28f4" containerName="placement-db-sync" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.942734 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.945486 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.946120 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.946247 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-j5phm" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.946307 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.946375 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 26 05:44:28 crc kubenswrapper[4871]: I1126 05:44:28.952372 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-665fcf99fb-m82r7"] Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.054046 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae63dcae-cddc-4f63-acc0-4ec3254a6116-scripts\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.054318 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7qrc\" (UniqueName: \"kubernetes.io/projected/ae63dcae-cddc-4f63-acc0-4ec3254a6116-kube-api-access-k7qrc\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.054497 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae63dcae-cddc-4f63-acc0-4ec3254a6116-combined-ca-bundle\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.054633 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae63dcae-cddc-4f63-acc0-4ec3254a6116-logs\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.054781 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae63dcae-cddc-4f63-acc0-4ec3254a6116-config-data\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.054947 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae63dcae-cddc-4f63-acc0-4ec3254a6116-public-tls-certs\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.055074 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae63dcae-cddc-4f63-acc0-4ec3254a6116-internal-tls-certs\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.066581 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-ff77984c8-tthxz"] Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.068207 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.071915 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-tgrk2" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.075698 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-574cf75679-xcbqs"] Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.077192 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-574cf75679-xcbqs" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.081543 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.082039 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.082064 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.113600 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-ff77984c8-tthxz"] Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.129315 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-574cf75679-xcbqs"] Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.160633 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19019851-fc4d-41ff-ba88-f347dc3305a2-logs\") pod \"barbican-worker-574cf75679-xcbqs\" (UID: \"19019851-fc4d-41ff-ba88-f347dc3305a2\") " pod="openstack/barbican-worker-574cf75679-xcbqs" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.160681 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19019851-fc4d-41ff-ba88-f347dc3305a2-combined-ca-bundle\") pod \"barbican-worker-574cf75679-xcbqs\" (UID: \"19019851-fc4d-41ff-ba88-f347dc3305a2\") " pod="openstack/barbican-worker-574cf75679-xcbqs" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.160714 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d0d2e04-05e3-4ace-8b11-0d6317e7ed80-config-data\") pod \"barbican-keystone-listener-ff77984c8-tthxz\" (UID: \"2d0d2e04-05e3-4ace-8b11-0d6317e7ed80\") " pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.160762 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae63dcae-cddc-4f63-acc0-4ec3254a6116-scripts\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.160782 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d0d2e04-05e3-4ace-8b11-0d6317e7ed80-logs\") pod \"barbican-keystone-listener-ff77984c8-tthxz\" (UID: \"2d0d2e04-05e3-4ace-8b11-0d6317e7ed80\") " pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.161658 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7qrc\" (UniqueName: \"kubernetes.io/projected/ae63dcae-cddc-4f63-acc0-4ec3254a6116-kube-api-access-k7qrc\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.161699 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19019851-fc4d-41ff-ba88-f347dc3305a2-config-data-custom\") pod \"barbican-worker-574cf75679-xcbqs\" (UID: \"19019851-fc4d-41ff-ba88-f347dc3305a2\") " pod="openstack/barbican-worker-574cf75679-xcbqs" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.161722 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mckd\" (UniqueName: \"kubernetes.io/projected/19019851-fc4d-41ff-ba88-f347dc3305a2-kube-api-access-9mckd\") pod \"barbican-worker-574cf75679-xcbqs\" (UID: \"19019851-fc4d-41ff-ba88-f347dc3305a2\") " pod="openstack/barbican-worker-574cf75679-xcbqs" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.161750 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19019851-fc4d-41ff-ba88-f347dc3305a2-config-data\") pod \"barbican-worker-574cf75679-xcbqs\" (UID: \"19019851-fc4d-41ff-ba88-f347dc3305a2\") " pod="openstack/barbican-worker-574cf75679-xcbqs" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.161772 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae63dcae-cddc-4f63-acc0-4ec3254a6116-combined-ca-bundle\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.161793 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrvjh\" (UniqueName: \"kubernetes.io/projected/2d0d2e04-05e3-4ace-8b11-0d6317e7ed80-kube-api-access-rrvjh\") pod \"barbican-keystone-listener-ff77984c8-tthxz\" (UID: \"2d0d2e04-05e3-4ace-8b11-0d6317e7ed80\") " pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.161816 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae63dcae-cddc-4f63-acc0-4ec3254a6116-logs\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.161837 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d0d2e04-05e3-4ace-8b11-0d6317e7ed80-combined-ca-bundle\") pod \"barbican-keystone-listener-ff77984c8-tthxz\" (UID: \"2d0d2e04-05e3-4ace-8b11-0d6317e7ed80\") " pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.161859 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae63dcae-cddc-4f63-acc0-4ec3254a6116-config-data\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.161947 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2d0d2e04-05e3-4ace-8b11-0d6317e7ed80-config-data-custom\") pod \"barbican-keystone-listener-ff77984c8-tthxz\" (UID: \"2d0d2e04-05e3-4ace-8b11-0d6317e7ed80\") " pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.161969 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae63dcae-cddc-4f63-acc0-4ec3254a6116-public-tls-certs\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.162004 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae63dcae-cddc-4f63-acc0-4ec3254a6116-internal-tls-certs\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.166915 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ae63dcae-cddc-4f63-acc0-4ec3254a6116-scripts\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.168619 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae63dcae-cddc-4f63-acc0-4ec3254a6116-internal-tls-certs\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.172832 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ae63dcae-cddc-4f63-acc0-4ec3254a6116-logs\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.176991 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae63dcae-cddc-4f63-acc0-4ec3254a6116-public-tls-certs\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.184345 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae63dcae-cddc-4f63-acc0-4ec3254a6116-combined-ca-bundle\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.190615 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ae63dcae-cddc-4f63-acc0-4ec3254a6116-config-data\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.201130 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7qrc\" (UniqueName: \"kubernetes.io/projected/ae63dcae-cddc-4f63-acc0-4ec3254a6116-kube-api-access-k7qrc\") pod \"placement-665fcf99fb-m82r7\" (UID: \"ae63dcae-cddc-4f63-acc0-4ec3254a6116\") " pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.204032 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-764bcc8bff-4h264"] Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.207694 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.226988 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-764bcc8bff-4h264"] Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.264317 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d0d2e04-05e3-4ace-8b11-0d6317e7ed80-logs\") pod \"barbican-keystone-listener-ff77984c8-tthxz\" (UID: \"2d0d2e04-05e3-4ace-8b11-0d6317e7ed80\") " pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.264406 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19019851-fc4d-41ff-ba88-f347dc3305a2-config-data-custom\") pod \"barbican-worker-574cf75679-xcbqs\" (UID: \"19019851-fc4d-41ff-ba88-f347dc3305a2\") " pod="openstack/barbican-worker-574cf75679-xcbqs" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.264442 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mckd\" (UniqueName: \"kubernetes.io/projected/19019851-fc4d-41ff-ba88-f347dc3305a2-kube-api-access-9mckd\") pod \"barbican-worker-574cf75679-xcbqs\" (UID: \"19019851-fc4d-41ff-ba88-f347dc3305a2\") " pod="openstack/barbican-worker-574cf75679-xcbqs" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.264482 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19019851-fc4d-41ff-ba88-f347dc3305a2-config-data\") pod \"barbican-worker-574cf75679-xcbqs\" (UID: \"19019851-fc4d-41ff-ba88-f347dc3305a2\") " pod="openstack/barbican-worker-574cf75679-xcbqs" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.264514 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrvjh\" (UniqueName: \"kubernetes.io/projected/2d0d2e04-05e3-4ace-8b11-0d6317e7ed80-kube-api-access-rrvjh\") pod \"barbican-keystone-listener-ff77984c8-tthxz\" (UID: \"2d0d2e04-05e3-4ace-8b11-0d6317e7ed80\") " pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.264572 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d0d2e04-05e3-4ace-8b11-0d6317e7ed80-combined-ca-bundle\") pod \"barbican-keystone-listener-ff77984c8-tthxz\" (UID: \"2d0d2e04-05e3-4ace-8b11-0d6317e7ed80\") " pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.264660 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2d0d2e04-05e3-4ace-8b11-0d6317e7ed80-config-data-custom\") pod \"barbican-keystone-listener-ff77984c8-tthxz\" (UID: \"2d0d2e04-05e3-4ace-8b11-0d6317e7ed80\") " pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.264729 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19019851-fc4d-41ff-ba88-f347dc3305a2-logs\") pod \"barbican-worker-574cf75679-xcbqs\" (UID: \"19019851-fc4d-41ff-ba88-f347dc3305a2\") " pod="openstack/barbican-worker-574cf75679-xcbqs" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.264757 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19019851-fc4d-41ff-ba88-f347dc3305a2-combined-ca-bundle\") pod \"barbican-worker-574cf75679-xcbqs\" (UID: \"19019851-fc4d-41ff-ba88-f347dc3305a2\") " pod="openstack/barbican-worker-574cf75679-xcbqs" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.264778 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d0d2e04-05e3-4ace-8b11-0d6317e7ed80-config-data\") pod \"barbican-keystone-listener-ff77984c8-tthxz\" (UID: \"2d0d2e04-05e3-4ace-8b11-0d6317e7ed80\") " pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.266122 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d0d2e04-05e3-4ace-8b11-0d6317e7ed80-logs\") pod \"barbican-keystone-listener-ff77984c8-tthxz\" (UID: \"2d0d2e04-05e3-4ace-8b11-0d6317e7ed80\") " pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.267979 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/19019851-fc4d-41ff-ba88-f347dc3305a2-logs\") pod \"barbican-worker-574cf75679-xcbqs\" (UID: \"19019851-fc4d-41ff-ba88-f347dc3305a2\") " pod="openstack/barbican-worker-574cf75679-xcbqs" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.272646 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d0d2e04-05e3-4ace-8b11-0d6317e7ed80-config-data\") pod \"barbican-keystone-listener-ff77984c8-tthxz\" (UID: \"2d0d2e04-05e3-4ace-8b11-0d6317e7ed80\") " pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.272916 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/19019851-fc4d-41ff-ba88-f347dc3305a2-config-data-custom\") pod \"barbican-worker-574cf75679-xcbqs\" (UID: \"19019851-fc4d-41ff-ba88-f347dc3305a2\") " pod="openstack/barbican-worker-574cf75679-xcbqs" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.274133 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d0d2e04-05e3-4ace-8b11-0d6317e7ed80-combined-ca-bundle\") pod \"barbican-keystone-listener-ff77984c8-tthxz\" (UID: \"2d0d2e04-05e3-4ace-8b11-0d6317e7ed80\") " pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.274144 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19019851-fc4d-41ff-ba88-f347dc3305a2-config-data\") pod \"barbican-worker-574cf75679-xcbqs\" (UID: \"19019851-fc4d-41ff-ba88-f347dc3305a2\") " pod="openstack/barbican-worker-574cf75679-xcbqs" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.278285 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19019851-fc4d-41ff-ba88-f347dc3305a2-combined-ca-bundle\") pod \"barbican-worker-574cf75679-xcbqs\" (UID: \"19019851-fc4d-41ff-ba88-f347dc3305a2\") " pod="openstack/barbican-worker-574cf75679-xcbqs" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.278799 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.279804 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2d0d2e04-05e3-4ace-8b11-0d6317e7ed80-config-data-custom\") pod \"barbican-keystone-listener-ff77984c8-tthxz\" (UID: \"2d0d2e04-05e3-4ace-8b11-0d6317e7ed80\") " pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.284847 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.299770 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5995b75f66-tdlhq"] Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.301633 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.303544 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mckd\" (UniqueName: \"kubernetes.io/projected/19019851-fc4d-41ff-ba88-f347dc3305a2-kube-api-access-9mckd\") pod \"barbican-worker-574cf75679-xcbqs\" (UID: \"19019851-fc4d-41ff-ba88-f347dc3305a2\") " pod="openstack/barbican-worker-574cf75679-xcbqs" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.308815 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.317224 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5995b75f66-tdlhq"] Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.318109 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrvjh\" (UniqueName: \"kubernetes.io/projected/2d0d2e04-05e3-4ace-8b11-0d6317e7ed80-kube-api-access-rrvjh\") pod \"barbican-keystone-listener-ff77984c8-tthxz\" (UID: \"2d0d2e04-05e3-4ace-8b11-0d6317e7ed80\") " pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.367609 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-ovsdbserver-nb\") pod \"dnsmasq-dns-764bcc8bff-4h264\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.367670 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-ovsdbserver-sb\") pod \"dnsmasq-dns-764bcc8bff-4h264\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.367730 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-dns-svc\") pod \"dnsmasq-dns-764bcc8bff-4h264\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.367751 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cs5p\" (UniqueName: \"kubernetes.io/projected/cd5769df-df2d-4456-971b-3050ac8cc37c-kube-api-access-7cs5p\") pod \"dnsmasq-dns-764bcc8bff-4h264\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.367793 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-dns-swift-storage-0\") pod \"dnsmasq-dns-764bcc8bff-4h264\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.367811 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-config\") pod \"dnsmasq-dns-764bcc8bff-4h264\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.409969 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.447916 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-574cf75679-xcbqs" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.472515 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-dns-swift-storage-0\") pod \"dnsmasq-dns-764bcc8bff-4h264\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.472586 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-config\") pod \"dnsmasq-dns-764bcc8bff-4h264\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.472629 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-combined-ca-bundle\") pod \"barbican-api-5995b75f66-tdlhq\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.472651 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2b24x\" (UniqueName: \"kubernetes.io/projected/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-kube-api-access-2b24x\") pod \"barbican-api-5995b75f66-tdlhq\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.472668 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-config-data-custom\") pod \"barbican-api-5995b75f66-tdlhq\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.472703 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-logs\") pod \"barbican-api-5995b75f66-tdlhq\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.472748 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-ovsdbserver-nb\") pod \"dnsmasq-dns-764bcc8bff-4h264\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.472818 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-ovsdbserver-sb\") pod \"dnsmasq-dns-764bcc8bff-4h264\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.472854 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-config-data\") pod \"barbican-api-5995b75f66-tdlhq\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.472888 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-dns-svc\") pod \"dnsmasq-dns-764bcc8bff-4h264\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.472911 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cs5p\" (UniqueName: \"kubernetes.io/projected/cd5769df-df2d-4456-971b-3050ac8cc37c-kube-api-access-7cs5p\") pod \"dnsmasq-dns-764bcc8bff-4h264\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.474199 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-dns-swift-storage-0\") pod \"dnsmasq-dns-764bcc8bff-4h264\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.475199 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-config\") pod \"dnsmasq-dns-764bcc8bff-4h264\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.477403 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-ovsdbserver-nb\") pod \"dnsmasq-dns-764bcc8bff-4h264\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.479405 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-dns-svc\") pod \"dnsmasq-dns-764bcc8bff-4h264\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.483964 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-ovsdbserver-sb\") pod \"dnsmasq-dns-764bcc8bff-4h264\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.499111 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cs5p\" (UniqueName: \"kubernetes.io/projected/cd5769df-df2d-4456-971b-3050ac8cc37c-kube-api-access-7cs5p\") pod \"dnsmasq-dns-764bcc8bff-4h264\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.574405 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-config-data\") pod \"barbican-api-5995b75f66-tdlhq\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.574496 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-combined-ca-bundle\") pod \"barbican-api-5995b75f66-tdlhq\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.574538 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2b24x\" (UniqueName: \"kubernetes.io/projected/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-kube-api-access-2b24x\") pod \"barbican-api-5995b75f66-tdlhq\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.574557 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-config-data-custom\") pod \"barbican-api-5995b75f66-tdlhq\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.574590 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-logs\") pod \"barbican-api-5995b75f66-tdlhq\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.575097 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-logs\") pod \"barbican-api-5995b75f66-tdlhq\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.585023 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-combined-ca-bundle\") pod \"barbican-api-5995b75f66-tdlhq\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.589139 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-config-data-custom\") pod \"barbican-api-5995b75f66-tdlhq\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.589724 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-config-data\") pod \"barbican-api-5995b75f66-tdlhq\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.602119 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2b24x\" (UniqueName: \"kubernetes.io/projected/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-kube-api-access-2b24x\") pod \"barbican-api-5995b75f66-tdlhq\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.690508 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.705686 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="711348b2-05b6-4d20-8eea-c2e19c4dc949" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.162:9322/\": read tcp 10.217.0.2:33280->10.217.0.162:9322: read: connection reset by peer" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.709674 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="711348b2-05b6-4d20-8eea-c2e19c4dc949" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.162:9322/\": read tcp 10.217.0.2:33270->10.217.0.162:9322: read: connection reset by peer" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.717176 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.865919 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-7pzbd" event={"ID":"c8bdb9c7-91c3-40dc-920e-6e333b18f331","Type":"ContainerStarted","Data":"c935206db6eafa84766722bfdd4c923595613409c3a25fe5b2ea4a43d6e58967"} Nov 26 05:44:29 crc kubenswrapper[4871]: I1126 05:44:29.994240 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-7pzbd" podStartSLOduration=4.767211325 podStartE2EDuration="52.994223766s" podCreationTimestamp="2025-11-26 05:43:37 +0000 UTC" firstStartedPulling="2025-11-26 05:43:39.514682698 +0000 UTC m=+1077.697734284" lastFinishedPulling="2025-11-26 05:44:27.741695139 +0000 UTC m=+1125.924746725" observedRunningTime="2025-11-26 05:44:29.894228192 +0000 UTC m=+1128.077279778" watchObservedRunningTime="2025-11-26 05:44:29.994223766 +0000 UTC m=+1128.177275352" Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.013408 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-665fcf99fb-m82r7"] Nov 26 05:44:30 crc kubenswrapper[4871]: W1126 05:44:30.396110 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2d0d2e04_05e3_4ace_8b11_0d6317e7ed80.slice/crio-1b0fcb265c97a90c7c9982582d2ae323418faa02580a95b55df172d12e8b6923 WatchSource:0}: Error finding container 1b0fcb265c97a90c7c9982582d2ae323418faa02580a95b55df172d12e8b6923: Status 404 returned error can't find the container with id 1b0fcb265c97a90c7c9982582d2ae323418faa02580a95b55df172d12e8b6923 Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.412583 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-ff77984c8-tthxz"] Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.436881 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-574cf75679-xcbqs"] Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.462701 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 26 05:44:30 crc kubenswrapper[4871]: W1126 05:44:30.614826 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod39bbf335_d8f3_41f0_84e8_b9589ff1b60c.slice/crio-b37f3809ef3b5cc09bb4e4f5083a0655282079ae210f31289a2d0e69e7a5f0cf WatchSource:0}: Error finding container b37f3809ef3b5cc09bb4e4f5083a0655282079ae210f31289a2d0e69e7a5f0cf: Status 404 returned error can't find the container with id b37f3809ef3b5cc09bb4e4f5083a0655282079ae210f31289a2d0e69e7a5f0cf Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.616369 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-764bcc8bff-4h264"] Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.625383 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/711348b2-05b6-4d20-8eea-c2e19c4dc949-logs\") pod \"711348b2-05b6-4d20-8eea-c2e19c4dc949\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.625435 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/711348b2-05b6-4d20-8eea-c2e19c4dc949-custom-prometheus-ca\") pod \"711348b2-05b6-4d20-8eea-c2e19c4dc949\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.625476 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/711348b2-05b6-4d20-8eea-c2e19c4dc949-config-data\") pod \"711348b2-05b6-4d20-8eea-c2e19c4dc949\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.625592 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4g765\" (UniqueName: \"kubernetes.io/projected/711348b2-05b6-4d20-8eea-c2e19c4dc949-kube-api-access-4g765\") pod \"711348b2-05b6-4d20-8eea-c2e19c4dc949\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.625819 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/711348b2-05b6-4d20-8eea-c2e19c4dc949-combined-ca-bundle\") pod \"711348b2-05b6-4d20-8eea-c2e19c4dc949\" (UID: \"711348b2-05b6-4d20-8eea-c2e19c4dc949\") " Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.628119 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/711348b2-05b6-4d20-8eea-c2e19c4dc949-kube-api-access-4g765" (OuterVolumeSpecName: "kube-api-access-4g765") pod "711348b2-05b6-4d20-8eea-c2e19c4dc949" (UID: "711348b2-05b6-4d20-8eea-c2e19c4dc949"). InnerVolumeSpecName "kube-api-access-4g765". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.628463 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/711348b2-05b6-4d20-8eea-c2e19c4dc949-logs" (OuterVolumeSpecName: "logs") pod "711348b2-05b6-4d20-8eea-c2e19c4dc949" (UID: "711348b2-05b6-4d20-8eea-c2e19c4dc949"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.637112 4871 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/711348b2-05b6-4d20-8eea-c2e19c4dc949-logs\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.637142 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4g765\" (UniqueName: \"kubernetes.io/projected/711348b2-05b6-4d20-8eea-c2e19c4dc949-kube-api-access-4g765\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.638230 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5995b75f66-tdlhq"] Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.738693 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/711348b2-05b6-4d20-8eea-c2e19c4dc949-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "711348b2-05b6-4d20-8eea-c2e19c4dc949" (UID: "711348b2-05b6-4d20-8eea-c2e19c4dc949"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.749615 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/711348b2-05b6-4d20-8eea-c2e19c4dc949-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "711348b2-05b6-4d20-8eea-c2e19c4dc949" (UID: "711348b2-05b6-4d20-8eea-c2e19c4dc949"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.795706 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/711348b2-05b6-4d20-8eea-c2e19c4dc949-config-data" (OuterVolumeSpecName: "config-data") pod "711348b2-05b6-4d20-8eea-c2e19c4dc949" (UID: "711348b2-05b6-4d20-8eea-c2e19c4dc949"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.842154 4871 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/711348b2-05b6-4d20-8eea-c2e19c4dc949-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.842197 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/711348b2-05b6-4d20-8eea-c2e19c4dc949-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.842210 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/711348b2-05b6-4d20-8eea-c2e19c4dc949-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.887670 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764bcc8bff-4h264" event={"ID":"cd5769df-df2d-4456-971b-3050ac8cc37c","Type":"ContainerStarted","Data":"27d59b016415f935a4c28911fa649b6475aff933357093d19ee9aa2326ffb652"} Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.890802 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-574cf75679-xcbqs" event={"ID":"19019851-fc4d-41ff-ba88-f347dc3305a2","Type":"ContainerStarted","Data":"f93c250a231e68be5c3ebd6efb166aa0d49f5eee43f99b1a2d46ea4790b0a677"} Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.918203 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-665fcf99fb-m82r7" event={"ID":"ae63dcae-cddc-4f63-acc0-4ec3254a6116","Type":"ContainerStarted","Data":"86355c257b2181855351d80e969f6cfdd916035a823e85f98a20bc1686c84fbb"} Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.918280 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-665fcf99fb-m82r7" event={"ID":"ae63dcae-cddc-4f63-acc0-4ec3254a6116","Type":"ContainerStarted","Data":"dfe2a0ee255d98203e7708aebae97a3f3c2270c12c8e412b19e06eb8c2df7da0"} Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.929404 4871 generic.go:334] "Generic (PLEG): container finished" podID="711348b2-05b6-4d20-8eea-c2e19c4dc949" containerID="31a0c5422d918660b09f105e0ea690cf93ca74ecb902b0280e6fc0393e4671ee" exitCode=0 Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.929485 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"711348b2-05b6-4d20-8eea-c2e19c4dc949","Type":"ContainerDied","Data":"31a0c5422d918660b09f105e0ea690cf93ca74ecb902b0280e6fc0393e4671ee"} Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.929518 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"711348b2-05b6-4d20-8eea-c2e19c4dc949","Type":"ContainerDied","Data":"96eece22e26aec4e3813ce1d6a3b2eccd59f1f708317e747c6c63dca29c50b88"} Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.929606 4871 scope.go:117] "RemoveContainer" containerID="31a0c5422d918660b09f105e0ea690cf93ca74ecb902b0280e6fc0393e4671ee" Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.929822 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.939639 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5995b75f66-tdlhq" event={"ID":"39bbf335-d8f3-41f0-84e8-b9589ff1b60c","Type":"ContainerStarted","Data":"b37f3809ef3b5cc09bb4e4f5083a0655282079ae210f31289a2d0e69e7a5f0cf"} Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.948199 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" event={"ID":"2d0d2e04-05e3-4ace-8b11-0d6317e7ed80","Type":"ContainerStarted","Data":"1b0fcb265c97a90c7c9982582d2ae323418faa02580a95b55df172d12e8b6923"} Nov 26 05:44:30 crc kubenswrapper[4871]: I1126 05:44:30.966126 4871 scope.go:117] "RemoveContainer" containerID="5a34ebe42c2186a08d73e4fceb07a09f26bb1c87ad34e154116e0f781969d17f" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.041556 4871 scope.go:117] "RemoveContainer" containerID="31a0c5422d918660b09f105e0ea690cf93ca74ecb902b0280e6fc0393e4671ee" Nov 26 05:44:31 crc kubenswrapper[4871]: E1126 05:44:31.042292 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31a0c5422d918660b09f105e0ea690cf93ca74ecb902b0280e6fc0393e4671ee\": container with ID starting with 31a0c5422d918660b09f105e0ea690cf93ca74ecb902b0280e6fc0393e4671ee not found: ID does not exist" containerID="31a0c5422d918660b09f105e0ea690cf93ca74ecb902b0280e6fc0393e4671ee" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.042323 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31a0c5422d918660b09f105e0ea690cf93ca74ecb902b0280e6fc0393e4671ee"} err="failed to get container status \"31a0c5422d918660b09f105e0ea690cf93ca74ecb902b0280e6fc0393e4671ee\": rpc error: code = NotFound desc = could not find container \"31a0c5422d918660b09f105e0ea690cf93ca74ecb902b0280e6fc0393e4671ee\": container with ID starting with 31a0c5422d918660b09f105e0ea690cf93ca74ecb902b0280e6fc0393e4671ee not found: ID does not exist" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.042342 4871 scope.go:117] "RemoveContainer" containerID="5a34ebe42c2186a08d73e4fceb07a09f26bb1c87ad34e154116e0f781969d17f" Nov 26 05:44:31 crc kubenswrapper[4871]: E1126 05:44:31.042735 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a34ebe42c2186a08d73e4fceb07a09f26bb1c87ad34e154116e0f781969d17f\": container with ID starting with 5a34ebe42c2186a08d73e4fceb07a09f26bb1c87ad34e154116e0f781969d17f not found: ID does not exist" containerID="5a34ebe42c2186a08d73e4fceb07a09f26bb1c87ad34e154116e0f781969d17f" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.042754 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a34ebe42c2186a08d73e4fceb07a09f26bb1c87ad34e154116e0f781969d17f"} err="failed to get container status \"5a34ebe42c2186a08d73e4fceb07a09f26bb1c87ad34e154116e0f781969d17f\": rpc error: code = NotFound desc = could not find container \"5a34ebe42c2186a08d73e4fceb07a09f26bb1c87ad34e154116e0f781969d17f\": container with ID starting with 5a34ebe42c2186a08d73e4fceb07a09f26bb1c87ad34e154116e0f781969d17f not found: ID does not exist" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.062511 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.071850 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-api-0"] Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.088046 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Nov 26 05:44:31 crc kubenswrapper[4871]: E1126 05:44:31.094877 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="711348b2-05b6-4d20-8eea-c2e19c4dc949" containerName="watcher-api" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.095005 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="711348b2-05b6-4d20-8eea-c2e19c4dc949" containerName="watcher-api" Nov 26 05:44:31 crc kubenswrapper[4871]: E1126 05:44:31.095144 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="711348b2-05b6-4d20-8eea-c2e19c4dc949" containerName="watcher-api-log" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.095264 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="711348b2-05b6-4d20-8eea-c2e19c4dc949" containerName="watcher-api-log" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.095564 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="711348b2-05b6-4d20-8eea-c2e19c4dc949" containerName="watcher-api-log" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.095644 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="711348b2-05b6-4d20-8eea-c2e19c4dc949" containerName="watcher-api" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.096742 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.098960 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-public-svc" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.099222 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-internal-svc" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.099222 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.101124 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.255819 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/154bc562-d8d8-4608-8973-66b427a4f98f-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.255865 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/154bc562-d8d8-4608-8973-66b427a4f98f-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.255900 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/154bc562-d8d8-4608-8973-66b427a4f98f-logs\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.255917 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/154bc562-d8d8-4608-8973-66b427a4f98f-public-tls-certs\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.255952 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfllp\" (UniqueName: \"kubernetes.io/projected/154bc562-d8d8-4608-8973-66b427a4f98f-kube-api-access-pfllp\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.255978 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/154bc562-d8d8-4608-8973-66b427a4f98f-config-data\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.256010 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/154bc562-d8d8-4608-8973-66b427a4f98f-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.357613 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/154bc562-d8d8-4608-8973-66b427a4f98f-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.357648 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/154bc562-d8d8-4608-8973-66b427a4f98f-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.357699 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/154bc562-d8d8-4608-8973-66b427a4f98f-logs\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.357725 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/154bc562-d8d8-4608-8973-66b427a4f98f-public-tls-certs\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.357768 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfllp\" (UniqueName: \"kubernetes.io/projected/154bc562-d8d8-4608-8973-66b427a4f98f-kube-api-access-pfllp\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.357798 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/154bc562-d8d8-4608-8973-66b427a4f98f-config-data\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.357826 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/154bc562-d8d8-4608-8973-66b427a4f98f-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.358928 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/154bc562-d8d8-4608-8973-66b427a4f98f-logs\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.364953 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/154bc562-d8d8-4608-8973-66b427a4f98f-config-data\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.366494 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/154bc562-d8d8-4608-8973-66b427a4f98f-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.367991 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/154bc562-d8d8-4608-8973-66b427a4f98f-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.369113 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/154bc562-d8d8-4608-8973-66b427a4f98f-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.373176 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/154bc562-d8d8-4608-8973-66b427a4f98f-public-tls-certs\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.394096 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfllp\" (UniqueName: \"kubernetes.io/projected/154bc562-d8d8-4608-8973-66b427a4f98f-kube-api-access-pfllp\") pod \"watcher-api-0\" (UID: \"154bc562-d8d8-4608-8973-66b427a4f98f\") " pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.438135 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.974714 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5995b75f66-tdlhq" event={"ID":"39bbf335-d8f3-41f0-84e8-b9589ff1b60c","Type":"ContainerStarted","Data":"7d77946b042e750ca6623b9dcba07756afad16d8a765a2519256ae2c396e6fa6"} Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.982391 4871 generic.go:334] "Generic (PLEG): container finished" podID="cd5769df-df2d-4456-971b-3050ac8cc37c" containerID="55695ba9f59bebb90443923c59e3d2a17e8ecc12ddd55e2996ff78165ae6e7e2" exitCode=0 Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.982512 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764bcc8bff-4h264" event={"ID":"cd5769df-df2d-4456-971b-3050ac8cc37c","Type":"ContainerDied","Data":"55695ba9f59bebb90443923c59e3d2a17e8ecc12ddd55e2996ff78165ae6e7e2"} Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.991956 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-665fcf99fb-m82r7" event={"ID":"ae63dcae-cddc-4f63-acc0-4ec3254a6116","Type":"ContainerStarted","Data":"8983bf0ff151e476fdf5d249d4e2ec0138c53a56af0ecb575f27d4626f0b107d"} Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.992657 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.992743 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.996102 4871 generic.go:334] "Generic (PLEG): container finished" podID="906807e1-f724-4ab4-9ccc-95656188890e" containerID="e76e1839b021122d94130fc2fe31234ee90db31804ad29bff7d87b4569898f52" exitCode=1 Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.996211 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"906807e1-f724-4ab4-9ccc-95656188890e","Type":"ContainerDied","Data":"e76e1839b021122d94130fc2fe31234ee90db31804ad29bff7d87b4569898f52"} Nov 26 05:44:31 crc kubenswrapper[4871]: I1126 05:44:31.996581 4871 scope.go:117] "RemoveContainer" containerID="e76e1839b021122d94130fc2fe31234ee90db31804ad29bff7d87b4569898f52" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.051077 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-665fcf99fb-m82r7" podStartSLOduration=4.051059441 podStartE2EDuration="4.051059441s" podCreationTimestamp="2025-11-26 05:44:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:32.040734054 +0000 UTC m=+1130.223785640" watchObservedRunningTime="2025-11-26 05:44:32.051059441 +0000 UTC m=+1130.234111017" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.433329 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6b4f49568b-znxq7"] Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.465767 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.471666 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6b4f49568b-znxq7"] Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.475438 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.475627 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.524206 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="711348b2-05b6-4d20-8eea-c2e19c4dc949" path="/var/lib/kubelet/pods/711348b2-05b6-4d20-8eea-c2e19c4dc949/volumes" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.591181 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/421fd2e9-5378-4cd9-89c0-523f89b8fea6-public-tls-certs\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.591242 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/421fd2e9-5378-4cd9-89c0-523f89b8fea6-logs\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.591471 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/421fd2e9-5378-4cd9-89c0-523f89b8fea6-config-data\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.591594 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d55ss\" (UniqueName: \"kubernetes.io/projected/421fd2e9-5378-4cd9-89c0-523f89b8fea6-kube-api-access-d55ss\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.591657 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/421fd2e9-5378-4cd9-89c0-523f89b8fea6-combined-ca-bundle\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.591733 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/421fd2e9-5378-4cd9-89c0-523f89b8fea6-config-data-custom\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.591875 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/421fd2e9-5378-4cd9-89c0-523f89b8fea6-internal-tls-certs\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.694208 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/421fd2e9-5378-4cd9-89c0-523f89b8fea6-logs\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.694315 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/421fd2e9-5378-4cd9-89c0-523f89b8fea6-config-data\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.694390 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d55ss\" (UniqueName: \"kubernetes.io/projected/421fd2e9-5378-4cd9-89c0-523f89b8fea6-kube-api-access-d55ss\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.694463 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/421fd2e9-5378-4cd9-89c0-523f89b8fea6-combined-ca-bundle\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.694556 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/421fd2e9-5378-4cd9-89c0-523f89b8fea6-config-data-custom\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.694659 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/421fd2e9-5378-4cd9-89c0-523f89b8fea6-internal-tls-certs\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.694808 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/421fd2e9-5378-4cd9-89c0-523f89b8fea6-public-tls-certs\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.705038 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/421fd2e9-5378-4cd9-89c0-523f89b8fea6-logs\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.714582 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d55ss\" (UniqueName: \"kubernetes.io/projected/421fd2e9-5378-4cd9-89c0-523f89b8fea6-kube-api-access-d55ss\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.715649 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/421fd2e9-5378-4cd9-89c0-523f89b8fea6-config-data-custom\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.716188 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/421fd2e9-5378-4cd9-89c0-523f89b8fea6-combined-ca-bundle\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.716536 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/421fd2e9-5378-4cd9-89c0-523f89b8fea6-config-data\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.718868 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/421fd2e9-5378-4cd9-89c0-523f89b8fea6-public-tls-certs\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.719016 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/421fd2e9-5378-4cd9-89c0-523f89b8fea6-internal-tls-certs\") pod \"barbican-api-6b4f49568b-znxq7\" (UID: \"421fd2e9-5378-4cd9-89c0-523f89b8fea6\") " pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:32 crc kubenswrapper[4871]: I1126 05:44:32.793591 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:34 crc kubenswrapper[4871]: I1126 05:44:34.152573 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 26 05:44:34 crc kubenswrapper[4871]: I1126 05:44:34.153191 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 26 05:44:34 crc kubenswrapper[4871]: I1126 05:44:34.284568 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Nov 26 05:44:34 crc kubenswrapper[4871]: I1126 05:44:34.320192 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Nov 26 05:44:35 crc kubenswrapper[4871]: I1126 05:44:35.055349 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Nov 26 05:44:36 crc kubenswrapper[4871]: I1126 05:44:36.047013 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5995b75f66-tdlhq" event={"ID":"39bbf335-d8f3-41f0-84e8-b9589ff1b60c","Type":"ContainerStarted","Data":"95a1257b7b87c0c6f5e3a60407c7428ca2153e4f024c94894b20f9aead01443b"} Nov 26 05:44:36 crc kubenswrapper[4871]: I1126 05:44:36.048170 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:36 crc kubenswrapper[4871]: I1126 05:44:36.048211 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:36 crc kubenswrapper[4871]: I1126 05:44:36.050910 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5995b75f66-tdlhq" podUID="39bbf335-d8f3-41f0-84e8-b9589ff1b60c" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.174:9311/healthcheck\": dial tcp 10.217.0.174:9311: connect: connection refused" Nov 26 05:44:36 crc kubenswrapper[4871]: I1126 05:44:36.052144 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764bcc8bff-4h264" event={"ID":"cd5769df-df2d-4456-971b-3050ac8cc37c","Type":"ContainerStarted","Data":"05479995604ea1c6b9fc3d4aecdaf6fdb8d17fe75cc495fd22daaaac79cdc6c6"} Nov 26 05:44:36 crc kubenswrapper[4871]: I1126 05:44:36.052376 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:36 crc kubenswrapper[4871]: I1126 05:44:36.057936 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"906807e1-f724-4ab4-9ccc-95656188890e","Type":"ContainerStarted","Data":"9e2d82019cfae3a801741429cd76d41e71f620675230c40d3af434b0678c5b24"} Nov 26 05:44:36 crc kubenswrapper[4871]: I1126 05:44:36.105012 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5995b75f66-tdlhq" podStartSLOduration=7.104993547 podStartE2EDuration="7.104993547s" podCreationTimestamp="2025-11-26 05:44:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:36.067549547 +0000 UTC m=+1134.250601133" watchObservedRunningTime="2025-11-26 05:44:36.104993547 +0000 UTC m=+1134.288045143" Nov 26 05:44:36 crc kubenswrapper[4871]: I1126 05:44:36.141057 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-764bcc8bff-4h264" podStartSLOduration=7.141038343 podStartE2EDuration="7.141038343s" podCreationTimestamp="2025-11-26 05:44:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:36.129023935 +0000 UTC m=+1134.312075521" watchObservedRunningTime="2025-11-26 05:44:36.141038343 +0000 UTC m=+1134.324089929" Nov 26 05:44:36 crc kubenswrapper[4871]: I1126 05:44:36.156071 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6b4f49568b-znxq7"] Nov 26 05:44:36 crc kubenswrapper[4871]: I1126 05:44:36.280311 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 26 05:44:36 crc kubenswrapper[4871]: W1126 05:44:36.301832 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod154bc562_d8d8_4608_8973_66b427a4f98f.slice/crio-1e9f382a54f3ecfd7c376ff5392d897b495fe0c3b79b02eef16ae5a7dc667e7d WatchSource:0}: Error finding container 1e9f382a54f3ecfd7c376ff5392d897b495fe0c3b79b02eef16ae5a7dc667e7d: Status 404 returned error can't find the container with id 1e9f382a54f3ecfd7c376ff5392d897b495fe0c3b79b02eef16ae5a7dc667e7d Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.075451 4871 generic.go:334] "Generic (PLEG): container finished" podID="c8bdb9c7-91c3-40dc-920e-6e333b18f331" containerID="c935206db6eafa84766722bfdd4c923595613409c3a25fe5b2ea4a43d6e58967" exitCode=0 Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.075830 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-7pzbd" event={"ID":"c8bdb9c7-91c3-40dc-920e-6e333b18f331","Type":"ContainerDied","Data":"c935206db6eafa84766722bfdd4c923595613409c3a25fe5b2ea4a43d6e58967"} Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.085922 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6b4f49568b-znxq7" event={"ID":"421fd2e9-5378-4cd9-89c0-523f89b8fea6","Type":"ContainerStarted","Data":"ffdc893328ef1dc2d50d75a274ba667b25aebe3e7eacef136e7a6b69ef382068"} Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.085981 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6b4f49568b-znxq7" event={"ID":"421fd2e9-5378-4cd9-89c0-523f89b8fea6","Type":"ContainerStarted","Data":"16f4245d6d813684ca6597bc1f59d16c80c8db1aeb0a587c17525d8cc4759a38"} Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.085999 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6b4f49568b-znxq7" event={"ID":"421fd2e9-5378-4cd9-89c0-523f89b8fea6","Type":"ContainerStarted","Data":"717b9d3d788db37497c4766444fd39fdf22a2a1b71f085e2be419895491ff084"} Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.086390 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.086438 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.101896 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" event={"ID":"2d0d2e04-05e3-4ace-8b11-0d6317e7ed80","Type":"ContainerStarted","Data":"966a23288ad4ec183b61ea7004b3882cfb511ddf80453ffa2b020bc69731cc8c"} Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.101963 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" event={"ID":"2d0d2e04-05e3-4ace-8b11-0d6317e7ed80","Type":"ContainerStarted","Data":"0696899bc5dee1258603ce74782ce71a79903b648eb1f844bddb7a940731d640"} Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.112605 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b724414-8682-4e73-8b2d-305fce381613","Type":"ContainerStarted","Data":"cbe54d9697268692a87e8638c65d42920b895611c17f85cef1bc6670f93edeac"} Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.117357 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"154bc562-d8d8-4608-8973-66b427a4f98f","Type":"ContainerStarted","Data":"ba5c318438b0bfc90b277ce25720950674a1d6a5cdcb36aed9e431cb7ed0cb48"} Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.117403 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"154bc562-d8d8-4608-8973-66b427a4f98f","Type":"ContainerStarted","Data":"a2ca540a8f431aa6abcc347d53f9a42d7a7988f45205c81d66e55bec0af7a9db"} Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.117443 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"154bc562-d8d8-4608-8973-66b427a4f98f","Type":"ContainerStarted","Data":"1e9f382a54f3ecfd7c376ff5392d897b495fe0c3b79b02eef16ae5a7dc667e7d"} Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.118214 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.121773 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="154bc562-d8d8-4608-8973-66b427a4f98f" containerName="watcher-api" probeResult="failure" output="Get \"https://10.217.0.175:9322/\": dial tcp 10.217.0.175:9322: connect: connection refused" Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.133596 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-574cf75679-xcbqs" event={"ID":"19019851-fc4d-41ff-ba88-f347dc3305a2","Type":"ContainerStarted","Data":"710a8bc56f8ff01e70c4550e4598fea44e8a6720e7525fd05c06e81f624dd6ad"} Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.133653 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-574cf75679-xcbqs" event={"ID":"19019851-fc4d-41ff-ba88-f347dc3305a2","Type":"ContainerStarted","Data":"9233648a31be45dae9aa366c853f10243000c85391d1038cc3ce00733835cc0d"} Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.139091 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6b4f49568b-znxq7" podStartSLOduration=5.139065121 podStartE2EDuration="5.139065121s" podCreationTimestamp="2025-11-26 05:44:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:37.125234147 +0000 UTC m=+1135.308285743" watchObservedRunningTime="2025-11-26 05:44:37.139065121 +0000 UTC m=+1135.322116707" Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.146775 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-ff77984c8-tthxz" podStartSLOduration=2.816596416 podStartE2EDuration="8.146757822s" podCreationTimestamp="2025-11-26 05:44:29 +0000 UTC" firstStartedPulling="2025-11-26 05:44:30.445625442 +0000 UTC m=+1128.628677028" lastFinishedPulling="2025-11-26 05:44:35.775786848 +0000 UTC m=+1133.958838434" observedRunningTime="2025-11-26 05:44:37.144219309 +0000 UTC m=+1135.327270905" watchObservedRunningTime="2025-11-26 05:44:37.146757822 +0000 UTC m=+1135.329809408" Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.166782 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-574cf75679-xcbqs" podStartSLOduration=2.904400328 podStartE2EDuration="8.166762029s" podCreationTimestamp="2025-11-26 05:44:29 +0000 UTC" firstStartedPulling="2025-11-26 05:44:30.457821375 +0000 UTC m=+1128.640872961" lastFinishedPulling="2025-11-26 05:44:35.720183076 +0000 UTC m=+1133.903234662" observedRunningTime="2025-11-26 05:44:37.165752474 +0000 UTC m=+1135.348804070" watchObservedRunningTime="2025-11-26 05:44:37.166762029 +0000 UTC m=+1135.349813615" Nov 26 05:44:37 crc kubenswrapper[4871]: I1126 05:44:37.192493 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=6.192471158 podStartE2EDuration="6.192471158s" podCreationTimestamp="2025-11-26 05:44:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:37.182285685 +0000 UTC m=+1135.365337271" watchObservedRunningTime="2025-11-26 05:44:37.192471158 +0000 UTC m=+1135.375522764" Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.149212 4871 generic.go:334] "Generic (PLEG): container finished" podID="b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6" containerID="5d01001680921a32f895fff185bc882d16b7258d62ddfedd545f48e12e45a0fc" exitCode=0 Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.149662 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-9kf7b" event={"ID":"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6","Type":"ContainerDied","Data":"5d01001680921a32f895fff185bc882d16b7258d62ddfedd545f48e12e45a0fc"} Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.546815 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.644398 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-config-data\") pod \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.644467 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-scripts\") pod \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.644586 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-combined-ca-bundle\") pod \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.644692 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-db-sync-config-data\") pod \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.644769 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c8bdb9c7-91c3-40dc-920e-6e333b18f331-etc-machine-id\") pod \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.644830 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9dnr\" (UniqueName: \"kubernetes.io/projected/c8bdb9c7-91c3-40dc-920e-6e333b18f331-kube-api-access-w9dnr\") pod \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\" (UID: \"c8bdb9c7-91c3-40dc-920e-6e333b18f331\") " Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.645153 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c8bdb9c7-91c3-40dc-920e-6e333b18f331-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "c8bdb9c7-91c3-40dc-920e-6e333b18f331" (UID: "c8bdb9c7-91c3-40dc-920e-6e333b18f331"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.645385 4871 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c8bdb9c7-91c3-40dc-920e-6e333b18f331-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.677209 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8bdb9c7-91c3-40dc-920e-6e333b18f331-kube-api-access-w9dnr" (OuterVolumeSpecName: "kube-api-access-w9dnr") pod "c8bdb9c7-91c3-40dc-920e-6e333b18f331" (UID: "c8bdb9c7-91c3-40dc-920e-6e333b18f331"). InnerVolumeSpecName "kube-api-access-w9dnr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.679072 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-scripts" (OuterVolumeSpecName: "scripts") pod "c8bdb9c7-91c3-40dc-920e-6e333b18f331" (UID: "c8bdb9c7-91c3-40dc-920e-6e333b18f331"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.685982 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "c8bdb9c7-91c3-40dc-920e-6e333b18f331" (UID: "c8bdb9c7-91c3-40dc-920e-6e333b18f331"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.697390 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c8bdb9c7-91c3-40dc-920e-6e333b18f331" (UID: "c8bdb9c7-91c3-40dc-920e-6e333b18f331"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.716783 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-config-data" (OuterVolumeSpecName: "config-data") pod "c8bdb9c7-91c3-40dc-920e-6e333b18f331" (UID: "c8bdb9c7-91c3-40dc-920e-6e333b18f331"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.747596 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.747632 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.747645 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.747655 4871 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c8bdb9c7-91c3-40dc-920e-6e333b18f331-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:38 crc kubenswrapper[4871]: I1126 05:44:38.747664 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9dnr\" (UniqueName: \"kubernetes.io/projected/c8bdb9c7-91c3-40dc-920e-6e333b18f331-kube-api-access-w9dnr\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.184952 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-7pzbd" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.184957 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-7pzbd" event={"ID":"c8bdb9c7-91c3-40dc-920e-6e333b18f331","Type":"ContainerDied","Data":"ca3addd3f65ad75944482261e2ad3a0edc47c590bbea5f15af8074c94166ef2d"} Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.185020 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca3addd3f65ad75944482261e2ad3a0edc47c590bbea5f15af8074c94166ef2d" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.426171 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 05:44:39 crc kubenswrapper[4871]: E1126 05:44:39.426545 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8bdb9c7-91c3-40dc-920e-6e333b18f331" containerName="cinder-db-sync" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.426560 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8bdb9c7-91c3-40dc-920e-6e333b18f331" containerName="cinder-db-sync" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.434030 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8bdb9c7-91c3-40dc-920e-6e333b18f331" containerName="cinder-db-sync" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.435124 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.453736 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.457550 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.457627 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.457567 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.457858 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-8qdzr" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.566334 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-764bcc8bff-4h264"] Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.566843 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-764bcc8bff-4h264" podUID="cd5769df-df2d-4456-971b-3050ac8cc37c" containerName="dnsmasq-dns" containerID="cri-o://05479995604ea1c6b9fc3d4aecdaf6fdb8d17fe75cc495fd22daaaac79cdc6c6" gracePeriod=10 Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.569060 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-scripts\") pod \"cinder-scheduler-0\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.569224 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqfcr\" (UniqueName: \"kubernetes.io/projected/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-kube-api-access-gqfcr\") pod \"cinder-scheduler-0\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.569361 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.569452 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.569606 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.569712 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-config-data\") pod \"cinder-scheduler-0\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.600900 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b8db6db6f-5dhp2"] Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.602474 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.624507 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b8db6db6f-5dhp2"] Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.671093 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qz9z6\" (UniqueName: \"kubernetes.io/projected/bc80c43e-82e4-44ae-a948-eb29150af2ea-kube-api-access-qz9z6\") pod \"dnsmasq-dns-6b8db6db6f-5dhp2\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.671139 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-dns-svc\") pod \"dnsmasq-dns-6b8db6db6f-5dhp2\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.671178 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.671198 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.671213 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-ovsdbserver-sb\") pod \"dnsmasq-dns-6b8db6db6f-5dhp2\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.671326 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.671357 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-config-data\") pod \"cinder-scheduler-0\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.671430 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-dns-swift-storage-0\") pod \"dnsmasq-dns-6b8db6db6f-5dhp2\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.671446 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-ovsdbserver-nb\") pod \"dnsmasq-dns-6b8db6db6f-5dhp2\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.671465 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-scripts\") pod \"cinder-scheduler-0\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.671579 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqfcr\" (UniqueName: \"kubernetes.io/projected/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-kube-api-access-gqfcr\") pod \"cinder-scheduler-0\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.671598 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-config\") pod \"dnsmasq-dns-6b8db6db6f-5dhp2\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.674279 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.678951 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.682341 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-scripts\") pod \"cinder-scheduler-0\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.691098 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-config-data\") pod \"cinder-scheduler-0\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.697111 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.706821 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.708809 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.715457 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.722755 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqfcr\" (UniqueName: \"kubernetes.io/projected/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-kube-api-access-gqfcr\") pod \"cinder-scheduler-0\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.746647 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.775248 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blqxb\" (UniqueName: \"kubernetes.io/projected/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-kube-api-access-blqxb\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.775518 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-config-data-custom\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.775659 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.775734 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-dns-swift-storage-0\") pod \"dnsmasq-dns-6b8db6db6f-5dhp2\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.775801 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-ovsdbserver-nb\") pod \"dnsmasq-dns-6b8db6db6f-5dhp2\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.775866 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-logs\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.775979 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-config\") pod \"dnsmasq-dns-6b8db6db6f-5dhp2\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.776057 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-config-data\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.776130 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-scripts\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.776243 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qz9z6\" (UniqueName: \"kubernetes.io/projected/bc80c43e-82e4-44ae-a948-eb29150af2ea-kube-api-access-qz9z6\") pod \"dnsmasq-dns-6b8db6db6f-5dhp2\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.776332 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-dns-svc\") pod \"dnsmasq-dns-6b8db6db6f-5dhp2\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.776406 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.776472 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-ovsdbserver-sb\") pod \"dnsmasq-dns-6b8db6db6f-5dhp2\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.777382 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-ovsdbserver-sb\") pod \"dnsmasq-dns-6b8db6db6f-5dhp2\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.777992 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-dns-swift-storage-0\") pod \"dnsmasq-dns-6b8db6db6f-5dhp2\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.778799 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-ovsdbserver-nb\") pod \"dnsmasq-dns-6b8db6db6f-5dhp2\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.779612 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-config\") pod \"dnsmasq-dns-6b8db6db6f-5dhp2\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.780458 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-dns-svc\") pod \"dnsmasq-dns-6b8db6db6f-5dhp2\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.801329 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qz9z6\" (UniqueName: \"kubernetes.io/projected/bc80c43e-82e4-44ae-a948-eb29150af2ea-kube-api-access-qz9z6\") pod \"dnsmasq-dns-6b8db6db6f-5dhp2\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.811669 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.877572 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-logs\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.877651 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-config-data\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.877682 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-scripts\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.877738 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.877801 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blqxb\" (UniqueName: \"kubernetes.io/projected/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-kube-api-access-blqxb\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.877825 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-config-data-custom\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.877852 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.878094 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-logs\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.879142 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-etc-machine-id\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.885540 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.885919 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-config-data\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.900697 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-config-data-custom\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.910887 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-scripts\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.918508 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blqxb\" (UniqueName: \"kubernetes.io/projected/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-kube-api-access-blqxb\") pod \"cinder-api-0\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " pod="openstack/cinder-api-0" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.923876 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:39 crc kubenswrapper[4871]: I1126 05:44:39.987285 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.004816 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-9kf7b" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.084428 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-db-sync-config-data\") pod \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\" (UID: \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\") " Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.084549 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t92vp\" (UniqueName: \"kubernetes.io/projected/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-kube-api-access-t92vp\") pod \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\" (UID: \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\") " Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.084948 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-config-data\") pod \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\" (UID: \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\") " Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.085023 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-combined-ca-bundle\") pod \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\" (UID: \"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6\") " Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.091167 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6" (UID: "b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.091499 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-kube-api-access-t92vp" (OuterVolumeSpecName: "kube-api-access-t92vp") pod "b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6" (UID: "b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6"). InnerVolumeSpecName "kube-api-access-t92vp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.117836 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.148445 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6" (UID: "b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.187592 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.187617 4871 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.187660 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t92vp\" (UniqueName: \"kubernetes.io/projected/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-kube-api-access-t92vp\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.197425 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.207341 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-config-data" (OuterVolumeSpecName: "config-data") pod "b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6" (UID: "b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.211892 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.237196 4871 generic.go:334] "Generic (PLEG): container finished" podID="906807e1-f724-4ab4-9ccc-95656188890e" containerID="9e2d82019cfae3a801741429cd76d41e71f620675230c40d3af434b0678c5b24" exitCode=1 Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.237271 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"906807e1-f724-4ab4-9ccc-95656188890e","Type":"ContainerDied","Data":"9e2d82019cfae3a801741429cd76d41e71f620675230c40d3af434b0678c5b24"} Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.237301 4871 scope.go:117] "RemoveContainer" containerID="e76e1839b021122d94130fc2fe31234ee90db31804ad29bff7d87b4569898f52" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.237964 4871 scope.go:117] "RemoveContainer" containerID="9e2d82019cfae3a801741429cd76d41e71f620675230c40d3af434b0678c5b24" Nov 26 05:44:40 crc kubenswrapper[4871]: E1126 05:44:40.238169 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(906807e1-f724-4ab4-9ccc-95656188890e)\"" pod="openstack/watcher-decision-engine-0" podUID="906807e1-f724-4ab4-9ccc-95656188890e" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.290591 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.294538 4871 generic.go:334] "Generic (PLEG): container finished" podID="cd5769df-df2d-4456-971b-3050ac8cc37c" containerID="05479995604ea1c6b9fc3d4aecdaf6fdb8d17fe75cc495fd22daaaac79cdc6c6" exitCode=0 Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.294623 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764bcc8bff-4h264" event={"ID":"cd5769df-df2d-4456-971b-3050ac8cc37c","Type":"ContainerDied","Data":"05479995604ea1c6b9fc3d4aecdaf6fdb8d17fe75cc495fd22daaaac79cdc6c6"} Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.304632 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-9kf7b" event={"ID":"b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6","Type":"ContainerDied","Data":"1ed612055e88b29dab36966117aad22fba1a95c3bb1162a284e2924b700069af"} Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.304669 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ed612055e88b29dab36966117aad22fba1a95c3bb1162a284e2924b700069af" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.304723 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-9kf7b" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.361669 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.503709 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-ovsdbserver-nb\") pod \"cd5769df-df2d-4456-971b-3050ac8cc37c\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.503759 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-dns-svc\") pod \"cd5769df-df2d-4456-971b-3050ac8cc37c\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.503811 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-config\") pod \"cd5769df-df2d-4456-971b-3050ac8cc37c\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.503834 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-ovsdbserver-sb\") pod \"cd5769df-df2d-4456-971b-3050ac8cc37c\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.503920 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-dns-swift-storage-0\") pod \"cd5769df-df2d-4456-971b-3050ac8cc37c\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.503969 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cs5p\" (UniqueName: \"kubernetes.io/projected/cd5769df-df2d-4456-971b-3050ac8cc37c-kube-api-access-7cs5p\") pod \"cd5769df-df2d-4456-971b-3050ac8cc37c\" (UID: \"cd5769df-df2d-4456-971b-3050ac8cc37c\") " Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.512100 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd5769df-df2d-4456-971b-3050ac8cc37c-kube-api-access-7cs5p" (OuterVolumeSpecName: "kube-api-access-7cs5p") pod "cd5769df-df2d-4456-971b-3050ac8cc37c" (UID: "cd5769df-df2d-4456-971b-3050ac8cc37c"). InnerVolumeSpecName "kube-api-access-7cs5p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.618728 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cs5p\" (UniqueName: \"kubernetes.io/projected/cd5769df-df2d-4456-971b-3050ac8cc37c-kube-api-access-7cs5p\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.657286 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "cd5769df-df2d-4456-971b-3050ac8cc37c" (UID: "cd5769df-df2d-4456-971b-3050ac8cc37c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.690196 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cd5769df-df2d-4456-971b-3050ac8cc37c" (UID: "cd5769df-df2d-4456-971b-3050ac8cc37c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.695659 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.713905 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "cd5769df-df2d-4456-971b-3050ac8cc37c" (UID: "cd5769df-df2d-4456-971b-3050ac8cc37c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.717180 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b8db6db6f-5dhp2"] Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.718963 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "cd5769df-df2d-4456-971b-3050ac8cc37c" (UID: "cd5769df-df2d-4456-971b-3050ac8cc37c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.721101 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.749977 4871 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.750007 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.750018 4871 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.765937 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-config" (OuterVolumeSpecName: "config") pod "cd5769df-df2d-4456-971b-3050ac8cc37c" (UID: "cd5769df-df2d-4456-971b-3050ac8cc37c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.800677 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75958fc765-m7jqm"] Nov 26 05:44:40 crc kubenswrapper[4871]: E1126 05:44:40.801096 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd5769df-df2d-4456-971b-3050ac8cc37c" containerName="init" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.801108 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd5769df-df2d-4456-971b-3050ac8cc37c" containerName="init" Nov 26 05:44:40 crc kubenswrapper[4871]: E1126 05:44:40.801134 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd5769df-df2d-4456-971b-3050ac8cc37c" containerName="dnsmasq-dns" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.801140 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd5769df-df2d-4456-971b-3050ac8cc37c" containerName="dnsmasq-dns" Nov 26 05:44:40 crc kubenswrapper[4871]: E1126 05:44:40.801152 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6" containerName="glance-db-sync" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.801158 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6" containerName="glance-db-sync" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.801342 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6" containerName="glance-db-sync" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.801357 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd5769df-df2d-4456-971b-3050ac8cc37c" containerName="dnsmasq-dns" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.802328 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.855962 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-config\") pod \"dnsmasq-dns-75958fc765-m7jqm\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.856052 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pgs84\" (UniqueName: \"kubernetes.io/projected/d2bb32df-62a8-451e-a469-464d621a12d2-kube-api-access-pgs84\") pod \"dnsmasq-dns-75958fc765-m7jqm\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.856085 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-dns-svc\") pod \"dnsmasq-dns-75958fc765-m7jqm\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.856111 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-ovsdbserver-sb\") pod \"dnsmasq-dns-75958fc765-m7jqm\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.856144 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-ovsdbserver-nb\") pod \"dnsmasq-dns-75958fc765-m7jqm\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.856185 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-dns-swift-storage-0\") pod \"dnsmasq-dns-75958fc765-m7jqm\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.856253 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd5769df-df2d-4456-971b-3050ac8cc37c-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.864280 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75958fc765-m7jqm"] Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.982768 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b8db6db6f-5dhp2"] Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.991832 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pgs84\" (UniqueName: \"kubernetes.io/projected/d2bb32df-62a8-451e-a469-464d621a12d2-kube-api-access-pgs84\") pod \"dnsmasq-dns-75958fc765-m7jqm\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.991915 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-dns-svc\") pod \"dnsmasq-dns-75958fc765-m7jqm\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.991939 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-ovsdbserver-sb\") pod \"dnsmasq-dns-75958fc765-m7jqm\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.991987 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-ovsdbserver-nb\") pod \"dnsmasq-dns-75958fc765-m7jqm\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.992049 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-dns-swift-storage-0\") pod \"dnsmasq-dns-75958fc765-m7jqm\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.992149 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-config\") pod \"dnsmasq-dns-75958fc765-m7jqm\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.993060 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-config\") pod \"dnsmasq-dns-75958fc765-m7jqm\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.993289 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-ovsdbserver-sb\") pod \"dnsmasq-dns-75958fc765-m7jqm\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.993606 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-ovsdbserver-nb\") pod \"dnsmasq-dns-75958fc765-m7jqm\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.994083 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-dns-svc\") pod \"dnsmasq-dns-75958fc765-m7jqm\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:40 crc kubenswrapper[4871]: I1126 05:44:40.994120 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-dns-swift-storage-0\") pod \"dnsmasq-dns-75958fc765-m7jqm\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.020855 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pgs84\" (UniqueName: \"kubernetes.io/projected/d2bb32df-62a8-451e-a469-464d621a12d2-kube-api-access-pgs84\") pod \"dnsmasq-dns-75958fc765-m7jqm\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.065659 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.193501 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.318244 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bfcbd2ac-54bd-46ea-8bed-81093bee3d65","Type":"ContainerStarted","Data":"573f16342110d11521b725fc9fd0d4d838ea3bd24dc0cef0214aedffa7a4494e"} Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.334842 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" event={"ID":"bc80c43e-82e4-44ae-a948-eb29150af2ea","Type":"ContainerStarted","Data":"614c77b64c2fac32b4a439bc728cdb8bec25e2c38055d65cf1fedec0c73e8b9d"} Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.338136 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764bcc8bff-4h264" event={"ID":"cd5769df-df2d-4456-971b-3050ac8cc37c","Type":"ContainerDied","Data":"27d59b016415f935a4c28911fa649b6475aff933357093d19ee9aa2326ffb652"} Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.338181 4871 scope.go:117] "RemoveContainer" containerID="05479995604ea1c6b9fc3d4aecdaf6fdb8d17fe75cc495fd22daaaac79cdc6c6" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.338326 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764bcc8bff-4h264" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.385251 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-764bcc8bff-4h264"] Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.389604 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-764bcc8bff-4h264"] Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.440676 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.441140 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.472626 4871 scope.go:117] "RemoveContainer" containerID="55695ba9f59bebb90443923c59e3d2a17e8ecc12ddd55e2996ff78165ae6e7e2" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.553597 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.555256 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.562778 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.562922 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.563027 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-2br48" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.566402 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.612592 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf64ca91-29f2-4c59-9010-155bd92559aa-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.612672 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf64ca91-29f2-4c59-9010-155bd92559aa-config-data\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.612741 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.612771 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf64ca91-29f2-4c59-9010-155bd92559aa-scripts\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.612832 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bf64ca91-29f2-4c59-9010-155bd92559aa-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.612852 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf64ca91-29f2-4c59-9010-155bd92559aa-logs\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.612882 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ll67v\" (UniqueName: \"kubernetes.io/projected/bf64ca91-29f2-4c59-9010-155bd92559aa-kube-api-access-ll67v\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.714902 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf64ca91-29f2-4c59-9010-155bd92559aa-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.714960 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf64ca91-29f2-4c59-9010-155bd92559aa-config-data\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.715027 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.715062 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf64ca91-29f2-4c59-9010-155bd92559aa-scripts\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.715124 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bf64ca91-29f2-4c59-9010-155bd92559aa-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.715142 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf64ca91-29f2-4c59-9010-155bd92559aa-logs\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.715165 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ll67v\" (UniqueName: \"kubernetes.io/projected/bf64ca91-29f2-4c59-9010-155bd92559aa-kube-api-access-ll67v\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.717915 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bf64ca91-29f2-4c59-9010-155bd92559aa-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.718844 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf64ca91-29f2-4c59-9010-155bd92559aa-logs\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.719489 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.728743 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75958fc765-m7jqm"] Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.728752 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf64ca91-29f2-4c59-9010-155bd92559aa-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.739560 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ll67v\" (UniqueName: \"kubernetes.io/projected/bf64ca91-29f2-4c59-9010-155bd92559aa-kube-api-access-ll67v\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.740546 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf64ca91-29f2-4c59-9010-155bd92559aa-config-data\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.783019 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf64ca91-29f2-4c59-9010-155bd92559aa-scripts\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.878714 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:41 crc kubenswrapper[4871]: I1126 05:44:41.922301 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.039134 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.044459 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.053848 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.055829 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.226572 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7tq2\" (UniqueName: \"kubernetes.io/projected/48330a72-b80c-44bc-be03-c34038434964-kube-api-access-w7tq2\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.226897 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48330a72-b80c-44bc-be03-c34038434964-config-data\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.226926 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/48330a72-b80c-44bc-be03-c34038434964-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.226963 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.226983 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48330a72-b80c-44bc-be03-c34038434964-scripts\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.227001 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48330a72-b80c-44bc-be03-c34038434964-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.227074 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/48330a72-b80c-44bc-be03-c34038434964-logs\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.328258 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.328312 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48330a72-b80c-44bc-be03-c34038434964-scripts\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.328343 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48330a72-b80c-44bc-be03-c34038434964-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.328449 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/48330a72-b80c-44bc-be03-c34038434964-logs\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.328454 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.329887 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7tq2\" (UniqueName: \"kubernetes.io/projected/48330a72-b80c-44bc-be03-c34038434964-kube-api-access-w7tq2\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.329953 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48330a72-b80c-44bc-be03-c34038434964-config-data\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.329988 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/48330a72-b80c-44bc-be03-c34038434964-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.342646 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48330a72-b80c-44bc-be03-c34038434964-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.348026 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/48330a72-b80c-44bc-be03-c34038434964-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.351077 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48330a72-b80c-44bc-be03-c34038434964-scripts\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.356007 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/48330a72-b80c-44bc-be03-c34038434964-logs\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.369194 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7tq2\" (UniqueName: \"kubernetes.io/projected/48330a72-b80c-44bc-be03-c34038434964-kube-api-access-w7tq2\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.398365 4871 generic.go:334] "Generic (PLEG): container finished" podID="bc80c43e-82e4-44ae-a948-eb29150af2ea" containerID="a5728191bee61cf58aec8460dd1fdcce2fdae7dc625ccf7466f200df9005e333" exitCode=0 Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.398441 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" event={"ID":"bc80c43e-82e4-44ae-a948-eb29150af2ea","Type":"ContainerDied","Data":"a5728191bee61cf58aec8460dd1fdcce2fdae7dc625ccf7466f200df9005e333"} Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.399268 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48330a72-b80c-44bc-be03-c34038434964-config-data\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.419690 4871 generic.go:334] "Generic (PLEG): container finished" podID="d2bb32df-62a8-451e-a469-464d621a12d2" containerID="bfcdcb7d37158281d937ab83f18bc6ea078af9a240958799b86ca5ada3dce012" exitCode=0 Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.419757 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75958fc765-m7jqm" event={"ID":"d2bb32df-62a8-451e-a469-464d621a12d2","Type":"ContainerDied","Data":"bfcdcb7d37158281d937ab83f18bc6ea078af9a240958799b86ca5ada3dce012"} Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.419781 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75958fc765-m7jqm" event={"ID":"d2bb32df-62a8-451e-a469-464d621a12d2","Type":"ContainerStarted","Data":"ca2ddd24dc51a5a55a3ce71cc9e024b1b182f6ce8d94c77ddc7d039e024e152e"} Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.427641 4871 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.428581 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1eca7326-5c90-4ff3-9ccb-879d01ed3e22","Type":"ContainerStarted","Data":"4aa9e58add8c6c1c5f0476ff5733deb8b6f99d917e490cecabdc60c3bb28cc4c"} Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.429284 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.484379 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/watcher-api-0" podUID="154bc562-d8d8-4608-8973-66b427a4f98f" containerName="watcher-api-log" probeResult="failure" output="Get \"https://10.217.0.175:9322/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.578751 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd5769df-df2d-4456-971b-3050ac8cc37c" path="/var/lib/kubelet/pods/cd5769df-df2d-4456-971b-3050ac8cc37c/volumes" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.665749 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.679064 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 05:44:42 crc kubenswrapper[4871]: I1126 05:44:42.734386 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 05:44:42 crc kubenswrapper[4871]: W1126 05:44:42.762853 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbf64ca91_29f2_4c59_9010_155bd92559aa.slice/crio-836f07506af9cf587ca19bb486d77def75ae0af8c3296b74f77402f4c935134f WatchSource:0}: Error finding container 836f07506af9cf587ca19bb486d77def75ae0af8c3296b74f77402f4c935134f: Status 404 returned error can't find the container with id 836f07506af9cf587ca19bb486d77def75ae0af8c3296b74f77402f4c935134f Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.127458 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.127738 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.289735 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-config\") pod \"bc80c43e-82e4-44ae-a948-eb29150af2ea\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.290016 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-ovsdbserver-sb\") pod \"bc80c43e-82e4-44ae-a948-eb29150af2ea\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.290060 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-dns-swift-storage-0\") pod \"bc80c43e-82e4-44ae-a948-eb29150af2ea\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.290091 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qz9z6\" (UniqueName: \"kubernetes.io/projected/bc80c43e-82e4-44ae-a948-eb29150af2ea-kube-api-access-qz9z6\") pod \"bc80c43e-82e4-44ae-a948-eb29150af2ea\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.290126 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-ovsdbserver-nb\") pod \"bc80c43e-82e4-44ae-a948-eb29150af2ea\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.290157 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-dns-svc\") pod \"bc80c43e-82e4-44ae-a948-eb29150af2ea\" (UID: \"bc80c43e-82e4-44ae-a948-eb29150af2ea\") " Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.337200 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc80c43e-82e4-44ae-a948-eb29150af2ea-kube-api-access-qz9z6" (OuterVolumeSpecName: "kube-api-access-qz9z6") pod "bc80c43e-82e4-44ae-a948-eb29150af2ea" (UID: "bc80c43e-82e4-44ae-a948-eb29150af2ea"). InnerVolumeSpecName "kube-api-access-qz9z6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.405211 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qz9z6\" (UniqueName: \"kubernetes.io/projected/bc80c43e-82e4-44ae-a948-eb29150af2ea-kube-api-access-qz9z6\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.432691 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.461629 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bfcbd2ac-54bd-46ea-8bed-81093bee3d65","Type":"ContainerStarted","Data":"45e2f38f04bfadc63926bda979815e00cfc7dc1296de8186f8a8c9c8ea2b72df"} Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.472721 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.473699 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8db6db6f-5dhp2" event={"ID":"bc80c43e-82e4-44ae-a948-eb29150af2ea","Type":"ContainerDied","Data":"614c77b64c2fac32b4a439bc728cdb8bec25e2c38055d65cf1fedec0c73e8b9d"} Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.473839 4871 scope.go:117] "RemoveContainer" containerID="a5728191bee61cf58aec8460dd1fdcce2fdae7dc625ccf7466f200df9005e333" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.488765 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bf64ca91-29f2-4c59-9010-155bd92559aa","Type":"ContainerStarted","Data":"836f07506af9cf587ca19bb486d77def75ae0af8c3296b74f77402f4c935134f"} Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.498803 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75958fc765-m7jqm" event={"ID":"d2bb32df-62a8-451e-a469-464d621a12d2","Type":"ContainerStarted","Data":"b8e969a5d5e59b0221edc667450c8de831c8bdee920d320a70eaedfccc530bcc"} Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.498884 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.520205 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-config" (OuterVolumeSpecName: "config") pod "bc80c43e-82e4-44ae-a948-eb29150af2ea" (UID: "bc80c43e-82e4-44ae-a948-eb29150af2ea"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.546682 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.552391 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-75958fc765-m7jqm" podStartSLOduration=3.552367819 podStartE2EDuration="3.552367819s" podCreationTimestamp="2025-11-26 05:44:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:43.527323257 +0000 UTC m=+1141.710374843" watchObservedRunningTime="2025-11-26 05:44:43.552367819 +0000 UTC m=+1141.735419405" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.557131 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bc80c43e-82e4-44ae-a948-eb29150af2ea" (UID: "bc80c43e-82e4-44ae-a948-eb29150af2ea"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.612744 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.615711 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bc80c43e-82e4-44ae-a948-eb29150af2ea" (UID: "bc80c43e-82e4-44ae-a948-eb29150af2ea"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.617061 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.619097 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bc80c43e-82e4-44ae-a948-eb29150af2ea" (UID: "bc80c43e-82e4-44ae-a948-eb29150af2ea"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.621156 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "bc80c43e-82e4-44ae-a948-eb29150af2ea" (UID: "bc80c43e-82e4-44ae-a948-eb29150af2ea"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.710115 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-7cbf6bc784-rm6hn" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.721206 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.721233 4871 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.721242 4871 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bc80c43e-82e4-44ae-a948-eb29150af2ea-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.804388 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-8665945b44-wbcwv"] Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.912325 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b8db6db6f-5dhp2"] Nov 26 05:44:43 crc kubenswrapper[4871]: I1126 05:44:43.921718 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b8db6db6f-5dhp2"] Nov 26 05:44:44 crc kubenswrapper[4871]: I1126 05:44:44.152656 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 26 05:44:44 crc kubenswrapper[4871]: I1126 05:44:44.152688 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 26 05:44:44 crc kubenswrapper[4871]: I1126 05:44:44.152697 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 26 05:44:44 crc kubenswrapper[4871]: I1126 05:44:44.153368 4871 scope.go:117] "RemoveContainer" containerID="9e2d82019cfae3a801741429cd76d41e71f620675230c40d3af434b0678c5b24" Nov 26 05:44:44 crc kubenswrapper[4871]: I1126 05:44:44.153371 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 26 05:44:44 crc kubenswrapper[4871]: E1126 05:44:44.153581 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(906807e1-f724-4ab4-9ccc-95656188890e)\"" pod="openstack/watcher-decision-engine-0" podUID="906807e1-f724-4ab4-9ccc-95656188890e" Nov 26 05:44:44 crc kubenswrapper[4871]: I1126 05:44:44.506402 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 05:44:44 crc kubenswrapper[4871]: I1126 05:44:44.570745 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc80c43e-82e4-44ae-a948-eb29150af2ea" path="/var/lib/kubelet/pods/bc80c43e-82e4-44ae-a948-eb29150af2ea/volumes" Nov 26 05:44:44 crc kubenswrapper[4871]: I1126 05:44:44.606158 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bf64ca91-29f2-4c59-9010-155bd92559aa","Type":"ContainerStarted","Data":"a1bee00c5798a50238bb7af9d9709554171477c19e620ebf30e1eab38960dc87"} Nov 26 05:44:44 crc kubenswrapper[4871]: I1126 05:44:44.611357 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"48330a72-b80c-44bc-be03-c34038434964","Type":"ContainerStarted","Data":"483e8ad9c97500671ed4ec2f255c4c996946e8fc62ddd97ef9973ffcbf95a10e"} Nov 26 05:44:44 crc kubenswrapper[4871]: I1126 05:44:44.615027 4871 scope.go:117] "RemoveContainer" containerID="9e2d82019cfae3a801741429cd76d41e71f620675230c40d3af434b0678c5b24" Nov 26 05:44:44 crc kubenswrapper[4871]: E1126 05:44:44.615285 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 10s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(906807e1-f724-4ab4-9ccc-95656188890e)\"" pod="openstack/watcher-decision-engine-0" podUID="906807e1-f724-4ab4-9ccc-95656188890e" Nov 26 05:44:44 crc kubenswrapper[4871]: I1126 05:44:44.615326 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1eca7326-5c90-4ff3-9ccb-879d01ed3e22","Type":"ContainerStarted","Data":"e72bd50d2175ea2f7ddb71f5321525ba8522689c69b9ecb37d0a08cb8296e583"} Nov 26 05:44:44 crc kubenswrapper[4871]: I1126 05:44:44.615799 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-8665945b44-wbcwv" podUID="812fa0f1-c216-4db1-b3e6-cfa862b8cb93" containerName="horizon-log" containerID="cri-o://2f753b791176f79d2770b9ecafcbce795dfc8ecb07673c67a3c4df5b21ef9c16" gracePeriod=30 Nov 26 05:44:44 crc kubenswrapper[4871]: I1126 05:44:44.616201 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-8665945b44-wbcwv" podUID="812fa0f1-c216-4db1-b3e6-cfa862b8cb93" containerName="horizon" containerID="cri-o://c136384f154b1d112425776e2afc3e7ec4248dce8ee44c4fa5505218badfb75c" gracePeriod=30 Nov 26 05:44:44 crc kubenswrapper[4871]: I1126 05:44:44.897286 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:44 crc kubenswrapper[4871]: I1126 05:44:44.941439 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.626836 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"48330a72-b80c-44bc-be03-c34038434964","Type":"ContainerStarted","Data":"40992af910c532fd9ffddeb174d7b08a45eeb61f07d3994726ed691c3a790092"} Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.633844 4871 generic.go:334] "Generic (PLEG): container finished" podID="812fa0f1-c216-4db1-b3e6-cfa862b8cb93" containerID="c136384f154b1d112425776e2afc3e7ec4248dce8ee44c4fa5505218badfb75c" exitCode=0 Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.633925 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8665945b44-wbcwv" event={"ID":"812fa0f1-c216-4db1-b3e6-cfa862b8cb93","Type":"ContainerDied","Data":"c136384f154b1d112425776e2afc3e7ec4248dce8ee44c4fa5505218badfb75c"} Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.637239 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1eca7326-5c90-4ff3-9ccb-879d01ed3e22","Type":"ContainerStarted","Data":"4d592b97a83066c6f0386dd216b64e6b5ad27d1356f76eee2a289b90a1a90158"} Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.637429 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.637443 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="1eca7326-5c90-4ff3-9ccb-879d01ed3e22" containerName="cinder-api" containerID="cri-o://4d592b97a83066c6f0386dd216b64e6b5ad27d1356f76eee2a289b90a1a90158" gracePeriod=30 Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.637484 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="1eca7326-5c90-4ff3-9ccb-879d01ed3e22" containerName="cinder-api-log" containerID="cri-o://e72bd50d2175ea2f7ddb71f5321525ba8522689c69b9ecb37d0a08cb8296e583" gracePeriod=30 Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.640958 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bfcbd2ac-54bd-46ea-8bed-81093bee3d65","Type":"ContainerStarted","Data":"6e08f547dee63f19308ab526809c8a9a451e5a5307d903add01e274d89904b73"} Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.643964 4871 generic.go:334] "Generic (PLEG): container finished" podID="3a392cdb-377e-4047-a1f4-f190429fe076" containerID="ae8ac99537258d7df8927e67f7ad624c60d74e466377d617ef3ff08389e1e3fd" exitCode=137 Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.644000 4871 generic.go:334] "Generic (PLEG): container finished" podID="3a392cdb-377e-4047-a1f4-f190429fe076" containerID="f5481d814a3efcc61a3330ac7e4e3e5f3899e1e7c4082c57528613cb9400a3cd" exitCode=137 Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.644047 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67f98f67b9-25fgx" event={"ID":"3a392cdb-377e-4047-a1f4-f190429fe076","Type":"ContainerDied","Data":"ae8ac99537258d7df8927e67f7ad624c60d74e466377d617ef3ff08389e1e3fd"} Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.644075 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67f98f67b9-25fgx" event={"ID":"3a392cdb-377e-4047-a1f4-f190429fe076","Type":"ContainerDied","Data":"f5481d814a3efcc61a3330ac7e4e3e5f3899e1e7c4082c57528613cb9400a3cd"} Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.648448 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bf64ca91-29f2-4c59-9010-155bd92559aa","Type":"ContainerStarted","Data":"cda44fc22a31a318115b323e855b216c5bd22e374baaab577bd79a79c068721b"} Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.665976 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=6.665953935 podStartE2EDuration="6.665953935s" podCreationTimestamp="2025-11-26 05:44:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:45.65448239 +0000 UTC m=+1143.837533976" watchObservedRunningTime="2025-11-26 05:44:45.665953935 +0000 UTC m=+1143.849005521" Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.670335 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6b4f49568b-znxq7" Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.697709 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.697692384 podStartE2EDuration="5.697692384s" podCreationTimestamp="2025-11-26 05:44:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:45.678988759 +0000 UTC m=+1143.862040345" watchObservedRunningTime="2025-11-26 05:44:45.697692384 +0000 UTC m=+1143.880743970" Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.724271 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=5.903340486 podStartE2EDuration="6.724253563s" podCreationTimestamp="2025-11-26 05:44:39 +0000 UTC" firstStartedPulling="2025-11-26 05:44:40.77372725 +0000 UTC m=+1138.956778836" lastFinishedPulling="2025-11-26 05:44:41.594640327 +0000 UTC m=+1139.777691913" observedRunningTime="2025-11-26 05:44:45.713399744 +0000 UTC m=+1143.896451330" watchObservedRunningTime="2025-11-26 05:44:45.724253563 +0000 UTC m=+1143.907305149" Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.786548 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5995b75f66-tdlhq"] Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.786765 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5995b75f66-tdlhq" podUID="39bbf335-d8f3-41f0-84e8-b9589ff1b60c" containerName="barbican-api-log" containerID="cri-o://7d77946b042e750ca6623b9dcba07756afad16d8a765a2519256ae2c396e6fa6" gracePeriod=30 Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.787109 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5995b75f66-tdlhq" podUID="39bbf335-d8f3-41f0-84e8-b9589ff1b60c" containerName="barbican-api" containerID="cri-o://95a1257b7b87c0c6f5e3a60407c7428ca2153e4f024c94894b20f9aead01443b" gracePeriod=30 Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.797914 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-5995b75f66-tdlhq" podUID="39bbf335-d8f3-41f0-84e8-b9589ff1b60c" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.174:9311/healthcheck\": EOF" Nov 26 05:44:45 crc kubenswrapper[4871]: I1126 05:44:45.798039 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5995b75f66-tdlhq" podUID="39bbf335-d8f3-41f0-84e8-b9589ff1b60c" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.174:9311/healthcheck\": EOF" Nov 26 05:44:46 crc kubenswrapper[4871]: I1126 05:44:46.117659 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 05:44:46 crc kubenswrapper[4871]: I1126 05:44:46.196402 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 05:44:46 crc kubenswrapper[4871]: I1126 05:44:46.666938 4871 generic.go:334] "Generic (PLEG): container finished" podID="1eca7326-5c90-4ff3-9ccb-879d01ed3e22" containerID="4d592b97a83066c6f0386dd216b64e6b5ad27d1356f76eee2a289b90a1a90158" exitCode=0 Nov 26 05:44:46 crc kubenswrapper[4871]: I1126 05:44:46.666970 4871 generic.go:334] "Generic (PLEG): container finished" podID="1eca7326-5c90-4ff3-9ccb-879d01ed3e22" containerID="e72bd50d2175ea2f7ddb71f5321525ba8522689c69b9ecb37d0a08cb8296e583" exitCode=143 Nov 26 05:44:46 crc kubenswrapper[4871]: I1126 05:44:46.667036 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1eca7326-5c90-4ff3-9ccb-879d01ed3e22","Type":"ContainerDied","Data":"4d592b97a83066c6f0386dd216b64e6b5ad27d1356f76eee2a289b90a1a90158"} Nov 26 05:44:46 crc kubenswrapper[4871]: I1126 05:44:46.667111 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1eca7326-5c90-4ff3-9ccb-879d01ed3e22","Type":"ContainerDied","Data":"e72bd50d2175ea2f7ddb71f5321525ba8522689c69b9ecb37d0a08cb8296e583"} Nov 26 05:44:46 crc kubenswrapper[4871]: I1126 05:44:46.677109 4871 generic.go:334] "Generic (PLEG): container finished" podID="39bbf335-d8f3-41f0-84e8-b9589ff1b60c" containerID="7d77946b042e750ca6623b9dcba07756afad16d8a765a2519256ae2c396e6fa6" exitCode=143 Nov 26 05:44:46 crc kubenswrapper[4871]: I1126 05:44:46.678158 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5995b75f66-tdlhq" event={"ID":"39bbf335-d8f3-41f0-84e8-b9589ff1b60c","Type":"ContainerDied","Data":"7d77946b042e750ca6623b9dcba07756afad16d8a765a2519256ae2c396e6fa6"} Nov 26 05:44:46 crc kubenswrapper[4871]: I1126 05:44:46.971493 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-8665945b44-wbcwv" podUID="812fa0f1-c216-4db1-b3e6-cfa862b8cb93" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.163:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.163:8443: connect: connection refused" Nov 26 05:44:47 crc kubenswrapper[4871]: I1126 05:44:47.687665 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="bf64ca91-29f2-4c59-9010-155bd92559aa" containerName="glance-log" containerID="cri-o://a1bee00c5798a50238bb7af9d9709554171477c19e620ebf30e1eab38960dc87" gracePeriod=30 Nov 26 05:44:47 crc kubenswrapper[4871]: I1126 05:44:47.688285 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="bf64ca91-29f2-4c59-9010-155bd92559aa" containerName="glance-httpd" containerID="cri-o://cda44fc22a31a318115b323e855b216c5bd22e374baaab577bd79a79c068721b" gracePeriod=30 Nov 26 05:44:47 crc kubenswrapper[4871]: I1126 05:44:47.710356 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:47 crc kubenswrapper[4871]: I1126 05:44:47.730338 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5566bf8457-7qhhj" Nov 26 05:44:47 crc kubenswrapper[4871]: I1126 05:44:47.782448 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-69c5fdfb8b-95x78"] Nov 26 05:44:47 crc kubenswrapper[4871]: I1126 05:44:47.783069 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-69c5fdfb8b-95x78" podUID="1d07d92e-a5ac-479d-b6e3-2e175c5a6fda" containerName="neutron-api" containerID="cri-o://6fa2f0a36e08b2b0a47a777f1517f4b1b54854205106229f6ac85b7fa7095be1" gracePeriod=30 Nov 26 05:44:47 crc kubenswrapper[4871]: I1126 05:44:47.783497 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-69c5fdfb8b-95x78" podUID="1d07d92e-a5ac-479d-b6e3-2e175c5a6fda" containerName="neutron-httpd" containerID="cri-o://4fc8075f7ea2c2d8e90856b0d0dae6ae981048a1d2ec0c27926f0418a51e4749" gracePeriod=30 Nov 26 05:44:48 crc kubenswrapper[4871]: I1126 05:44:48.716681 4871 generic.go:334] "Generic (PLEG): container finished" podID="bf64ca91-29f2-4c59-9010-155bd92559aa" containerID="cda44fc22a31a318115b323e855b216c5bd22e374baaab577bd79a79c068721b" exitCode=0 Nov 26 05:44:48 crc kubenswrapper[4871]: I1126 05:44:48.716723 4871 generic.go:334] "Generic (PLEG): container finished" podID="bf64ca91-29f2-4c59-9010-155bd92559aa" containerID="a1bee00c5798a50238bb7af9d9709554171477c19e620ebf30e1eab38960dc87" exitCode=143 Nov 26 05:44:48 crc kubenswrapper[4871]: I1126 05:44:48.716746 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bf64ca91-29f2-4c59-9010-155bd92559aa","Type":"ContainerDied","Data":"cda44fc22a31a318115b323e855b216c5bd22e374baaab577bd79a79c068721b"} Nov 26 05:44:48 crc kubenswrapper[4871]: I1126 05:44:48.716786 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bf64ca91-29f2-4c59-9010-155bd92559aa","Type":"ContainerDied","Data":"a1bee00c5798a50238bb7af9d9709554171477c19e620ebf30e1eab38960dc87"} Nov 26 05:44:48 crc kubenswrapper[4871]: I1126 05:44:48.721539 4871 generic.go:334] "Generic (PLEG): container finished" podID="1d07d92e-a5ac-479d-b6e3-2e175c5a6fda" containerID="4fc8075f7ea2c2d8e90856b0d0dae6ae981048a1d2ec0c27926f0418a51e4749" exitCode=0 Nov 26 05:44:48 crc kubenswrapper[4871]: I1126 05:44:48.721557 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-69c5fdfb8b-95x78" event={"ID":"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda","Type":"ContainerDied","Data":"4fc8075f7ea2c2d8e90856b0d0dae6ae981048a1d2ec0c27926f0418a51e4749"} Nov 26 05:44:49 crc kubenswrapper[4871]: I1126 05:44:49.718969 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5995b75f66-tdlhq" podUID="39bbf335-d8f3-41f0-84e8-b9589ff1b60c" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.174:9311/healthcheck\": dial tcp 10.217.0.174:9311: connect: connection refused" Nov 26 05:44:49 crc kubenswrapper[4871]: I1126 05:44:49.719068 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5995b75f66-tdlhq" podUID="39bbf335-d8f3-41f0-84e8-b9589ff1b60c" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.174:9311/healthcheck\": dial tcp 10.217.0.174:9311: connect: connection refused" Nov 26 05:44:49 crc kubenswrapper[4871]: I1126 05:44:49.732038 4871 generic.go:334] "Generic (PLEG): container finished" podID="39bbf335-d8f3-41f0-84e8-b9589ff1b60c" containerID="95a1257b7b87c0c6f5e3a60407c7428ca2153e4f024c94894b20f9aead01443b" exitCode=0 Nov 26 05:44:49 crc kubenswrapper[4871]: I1126 05:44:49.732078 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5995b75f66-tdlhq" event={"ID":"39bbf335-d8f3-41f0-84e8-b9589ff1b60c","Type":"ContainerDied","Data":"95a1257b7b87c0c6f5e3a60407c7428ca2153e4f024c94894b20f9aead01443b"} Nov 26 05:44:49 crc kubenswrapper[4871]: I1126 05:44:49.813082 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 26 05:44:49 crc kubenswrapper[4871]: I1126 05:44:49.935154 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 26 05:44:50 crc kubenswrapper[4871]: I1126 05:44:50.795180 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 05:44:51 crc kubenswrapper[4871]: I1126 05:44:51.067707 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:44:51 crc kubenswrapper[4871]: I1126 05:44:51.153203 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7648c6b969-hsbsf"] Nov 26 05:44:51 crc kubenswrapper[4871]: I1126 05:44:51.159755 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" podUID="196ce4b4-28af-4295-b20e-4d1cfa847b27" containerName="dnsmasq-dns" containerID="cri-o://6a1be64c982f17ed763f56f7483ff8f43ad84ec11a54f1b3259d818e95bd917b" gracePeriod=10 Nov 26 05:44:51 crc kubenswrapper[4871]: I1126 05:44:51.454866 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Nov 26 05:44:51 crc kubenswrapper[4871]: I1126 05:44:51.461392 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 26 05:44:51 crc kubenswrapper[4871]: I1126 05:44:51.757145 4871 generic.go:334] "Generic (PLEG): container finished" podID="196ce4b4-28af-4295-b20e-4d1cfa847b27" containerID="6a1be64c982f17ed763f56f7483ff8f43ad84ec11a54f1b3259d818e95bd917b" exitCode=0 Nov 26 05:44:51 crc kubenswrapper[4871]: I1126 05:44:51.757212 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" event={"ID":"196ce4b4-28af-4295-b20e-4d1cfa847b27","Type":"ContainerDied","Data":"6a1be64c982f17ed763f56f7483ff8f43ad84ec11a54f1b3259d818e95bd917b"} Nov 26 05:44:51 crc kubenswrapper[4871]: I1126 05:44:51.757803 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="bfcbd2ac-54bd-46ea-8bed-81093bee3d65" containerName="cinder-scheduler" containerID="cri-o://45e2f38f04bfadc63926bda979815e00cfc7dc1296de8186f8a8c9c8ea2b72df" gracePeriod=30 Nov 26 05:44:51 crc kubenswrapper[4871]: I1126 05:44:51.757907 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="bfcbd2ac-54bd-46ea-8bed-81093bee3d65" containerName="probe" containerID="cri-o://6e08f547dee63f19308ab526809c8a9a451e5a5307d903add01e274d89904b73" gracePeriod=30 Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.416633 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.535956 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-etc-machine-id\") pod \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.536051 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-scripts\") pod \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.536162 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-combined-ca-bundle\") pod \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.536205 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-blqxb\" (UniqueName: \"kubernetes.io/projected/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-kube-api-access-blqxb\") pod \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.536251 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-config-data-custom\") pod \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.536365 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-logs\") pod \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.536465 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-config-data\") pod \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\" (UID: \"1eca7326-5c90-4ff3-9ccb-879d01ed3e22\") " Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.537214 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "1eca7326-5c90-4ff3-9ccb-879d01ed3e22" (UID: "1eca7326-5c90-4ff3-9ccb-879d01ed3e22"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.537691 4871 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.538974 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-logs" (OuterVolumeSpecName: "logs") pod "1eca7326-5c90-4ff3-9ccb-879d01ed3e22" (UID: "1eca7326-5c90-4ff3-9ccb-879d01ed3e22"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.550457 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1eca7326-5c90-4ff3-9ccb-879d01ed3e22" (UID: "1eca7326-5c90-4ff3-9ccb-879d01ed3e22"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.551935 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-scripts" (OuterVolumeSpecName: "scripts") pod "1eca7326-5c90-4ff3-9ccb-879d01ed3e22" (UID: "1eca7326-5c90-4ff3-9ccb-879d01ed3e22"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.552177 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-kube-api-access-blqxb" (OuterVolumeSpecName: "kube-api-access-blqxb") pod "1eca7326-5c90-4ff3-9ccb-879d01ed3e22" (UID: "1eca7326-5c90-4ff3-9ccb-879d01ed3e22"). InnerVolumeSpecName "kube-api-access-blqxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.598705 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1eca7326-5c90-4ff3-9ccb-879d01ed3e22" (UID: "1eca7326-5c90-4ff3-9ccb-879d01ed3e22"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.639779 4871 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-logs\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.639805 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.639814 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.639824 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-blqxb\" (UniqueName: \"kubernetes.io/projected/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-kube-api-access-blqxb\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.639835 4871 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.677287 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-config-data" (OuterVolumeSpecName: "config-data") pod "1eca7326-5c90-4ff3-9ccb-879d01ed3e22" (UID: "1eca7326-5c90-4ff3-9ccb-879d01ed3e22"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.741909 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1eca7326-5c90-4ff3-9ccb-879d01ed3e22-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.777761 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.784471 4871 generic.go:334] "Generic (PLEG): container finished" podID="bfcbd2ac-54bd-46ea-8bed-81093bee3d65" containerID="6e08f547dee63f19308ab526809c8a9a451e5a5307d903add01e274d89904b73" exitCode=0 Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.811405 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67f98f67b9-25fgx" event={"ID":"3a392cdb-377e-4047-a1f4-f190429fe076","Type":"ContainerDied","Data":"c028e4e4ac4123436009cc22f9edb7f403a4f9a7d4454c859a70e38a86ed71de"} Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.811445 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c028e4e4ac4123436009cc22f9edb7f403a4f9a7d4454c859a70e38a86ed71de" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.811458 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bf64ca91-29f2-4c59-9010-155bd92559aa","Type":"ContainerDied","Data":"836f07506af9cf587ca19bb486d77def75ae0af8c3296b74f77402f4c935134f"} Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.811470 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="836f07506af9cf587ca19bb486d77def75ae0af8c3296b74f77402f4c935134f" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.811484 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"1eca7326-5c90-4ff3-9ccb-879d01ed3e22","Type":"ContainerDied","Data":"4aa9e58add8c6c1c5f0476ff5733deb8b6f99d917e490cecabdc60c3bb28cc4c"} Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.811500 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bfcbd2ac-54bd-46ea-8bed-81093bee3d65","Type":"ContainerDied","Data":"6e08f547dee63f19308ab526809c8a9a451e5a5307d903add01e274d89904b73"} Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.811511 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5995b75f66-tdlhq" event={"ID":"39bbf335-d8f3-41f0-84e8-b9589ff1b60c","Type":"ContainerDied","Data":"b37f3809ef3b5cc09bb4e4f5083a0655282079ae210f31289a2d0e69e7a5f0cf"} Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.811537 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b37f3809ef3b5cc09bb4e4f5083a0655282079ae210f31289a2d0e69e7a5f0cf" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.811554 4871 scope.go:117] "RemoveContainer" containerID="4d592b97a83066c6f0386dd216b64e6b5ad27d1356f76eee2a289b90a1a90158" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.828089 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.855457 4871 scope.go:117] "RemoveContainer" containerID="e72bd50d2175ea2f7ddb71f5321525ba8522689c69b9ecb37d0a08cb8296e583" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.880811 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.899743 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.911643 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.913608 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.928286 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.945456 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf64ca91-29f2-4c59-9010-155bd92559aa-scripts\") pod \"bf64ca91-29f2-4c59-9010-155bd92559aa\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.945561 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf64ca91-29f2-4c59-9010-155bd92559aa-config-data\") pod \"bf64ca91-29f2-4c59-9010-155bd92559aa\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.945591 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf64ca91-29f2-4c59-9010-155bd92559aa-combined-ca-bundle\") pod \"bf64ca91-29f2-4c59-9010-155bd92559aa\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.945648 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"bf64ca91-29f2-4c59-9010-155bd92559aa\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.945718 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf64ca91-29f2-4c59-9010-155bd92559aa-logs\") pod \"bf64ca91-29f2-4c59-9010-155bd92559aa\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.945789 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ll67v\" (UniqueName: \"kubernetes.io/projected/bf64ca91-29f2-4c59-9010-155bd92559aa-kube-api-access-ll67v\") pod \"bf64ca91-29f2-4c59-9010-155bd92559aa\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.945811 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bf64ca91-29f2-4c59-9010-155bd92559aa-httpd-run\") pod \"bf64ca91-29f2-4c59-9010-155bd92559aa\" (UID: \"bf64ca91-29f2-4c59-9010-155bd92559aa\") " Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.946789 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf64ca91-29f2-4c59-9010-155bd92559aa-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "bf64ca91-29f2-4c59-9010-155bd92559aa" (UID: "bf64ca91-29f2-4c59-9010-155bd92559aa"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.950221 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 26 05:44:52 crc kubenswrapper[4871]: E1126 05:44:52.950865 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1eca7326-5c90-4ff3-9ccb-879d01ed3e22" containerName="cinder-api" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.950950 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="1eca7326-5c90-4ff3-9ccb-879d01ed3e22" containerName="cinder-api" Nov 26 05:44:52 crc kubenswrapper[4871]: E1126 05:44:52.951037 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1eca7326-5c90-4ff3-9ccb-879d01ed3e22" containerName="cinder-api-log" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.951091 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="1eca7326-5c90-4ff3-9ccb-879d01ed3e22" containerName="cinder-api-log" Nov 26 05:44:52 crc kubenswrapper[4871]: E1126 05:44:52.951148 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39bbf335-d8f3-41f0-84e8-b9589ff1b60c" containerName="barbican-api-log" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.951199 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="39bbf335-d8f3-41f0-84e8-b9589ff1b60c" containerName="barbican-api-log" Nov 26 05:44:52 crc kubenswrapper[4871]: E1126 05:44:52.951276 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39bbf335-d8f3-41f0-84e8-b9589ff1b60c" containerName="barbican-api" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.951333 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="39bbf335-d8f3-41f0-84e8-b9589ff1b60c" containerName="barbican-api" Nov 26 05:44:52 crc kubenswrapper[4871]: E1126 05:44:52.951388 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf64ca91-29f2-4c59-9010-155bd92559aa" containerName="glance-log" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.951446 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf64ca91-29f2-4c59-9010-155bd92559aa" containerName="glance-log" Nov 26 05:44:52 crc kubenswrapper[4871]: E1126 05:44:52.951508 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="196ce4b4-28af-4295-b20e-4d1cfa847b27" containerName="dnsmasq-dns" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.951574 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="196ce4b4-28af-4295-b20e-4d1cfa847b27" containerName="dnsmasq-dns" Nov 26 05:44:52 crc kubenswrapper[4871]: E1126 05:44:52.951634 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a392cdb-377e-4047-a1f4-f190429fe076" containerName="horizon" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.951700 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a392cdb-377e-4047-a1f4-f190429fe076" containerName="horizon" Nov 26 05:44:52 crc kubenswrapper[4871]: E1126 05:44:52.951845 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf64ca91-29f2-4c59-9010-155bd92559aa" containerName="glance-httpd" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.951897 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf64ca91-29f2-4c59-9010-155bd92559aa" containerName="glance-httpd" Nov 26 05:44:52 crc kubenswrapper[4871]: E1126 05:44:52.951948 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="196ce4b4-28af-4295-b20e-4d1cfa847b27" containerName="init" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.952001 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="196ce4b4-28af-4295-b20e-4d1cfa847b27" containerName="init" Nov 26 05:44:52 crc kubenswrapper[4871]: E1126 05:44:52.952060 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc80c43e-82e4-44ae-a948-eb29150af2ea" containerName="init" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.952110 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc80c43e-82e4-44ae-a948-eb29150af2ea" containerName="init" Nov 26 05:44:52 crc kubenswrapper[4871]: E1126 05:44:52.952172 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a392cdb-377e-4047-a1f4-f190429fe076" containerName="horizon-log" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.952227 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a392cdb-377e-4047-a1f4-f190429fe076" containerName="horizon-log" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.952466 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a392cdb-377e-4047-a1f4-f190429fe076" containerName="horizon-log" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.953973 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="39bbf335-d8f3-41f0-84e8-b9589ff1b60c" containerName="barbican-api-log" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.954089 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="1eca7326-5c90-4ff3-9ccb-879d01ed3e22" containerName="cinder-api-log" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.954470 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf64ca91-29f2-4c59-9010-155bd92559aa" containerName="glance-log" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.953360 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf64ca91-29f2-4c59-9010-155bd92559aa-logs" (OuterVolumeSpecName: "logs") pod "bf64ca91-29f2-4c59-9010-155bd92559aa" (UID: "bf64ca91-29f2-4c59-9010-155bd92559aa"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.956407 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="1eca7326-5c90-4ff3-9ccb-879d01ed3e22" containerName="cinder-api" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.956505 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a392cdb-377e-4047-a1f4-f190429fe076" containerName="horizon" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.956625 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc80c43e-82e4-44ae-a948-eb29150af2ea" containerName="init" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.956704 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="196ce4b4-28af-4295-b20e-4d1cfa847b27" containerName="dnsmasq-dns" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.956766 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="39bbf335-d8f3-41f0-84e8-b9589ff1b60c" containerName="barbican-api" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.956835 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf64ca91-29f2-4c59-9010-155bd92559aa" containerName="glance-httpd" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.958477 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.963755 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf64ca91-29f2-4c59-9010-155bd92559aa-kube-api-access-ll67v" (OuterVolumeSpecName: "kube-api-access-ll67v") pod "bf64ca91-29f2-4c59-9010-155bd92559aa" (UID: "bf64ca91-29f2-4c59-9010-155bd92559aa"). InnerVolumeSpecName "kube-api-access-ll67v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.964475 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.964660 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.964821 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.967665 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance") pod "bf64ca91-29f2-4c59-9010-155bd92559aa" (UID: "bf64ca91-29f2-4c59-9010-155bd92559aa"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.969662 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf64ca91-29f2-4c59-9010-155bd92559aa-scripts" (OuterVolumeSpecName: "scripts") pod "bf64ca91-29f2-4c59-9010-155bd92559aa" (UID: "bf64ca91-29f2-4c59-9010-155bd92559aa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:52 crc kubenswrapper[4871]: I1126 05:44:52.970545 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.021799 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf64ca91-29f2-4c59-9010-155bd92559aa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bf64ca91-29f2-4c59-9010-155bd92559aa" (UID: "bf64ca91-29f2-4c59-9010-155bd92559aa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.047358 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a392cdb-377e-4047-a1f4-f190429fe076-config-data\") pod \"3a392cdb-377e-4047-a1f4-f190429fe076\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.047430 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a392cdb-377e-4047-a1f4-f190429fe076-logs\") pod \"3a392cdb-377e-4047-a1f4-f190429fe076\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.047485 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ggnph\" (UniqueName: \"kubernetes.io/projected/3a392cdb-377e-4047-a1f4-f190429fe076-kube-api-access-ggnph\") pod \"3a392cdb-377e-4047-a1f4-f190429fe076\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.047513 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-dns-svc\") pod \"196ce4b4-28af-4295-b20e-4d1cfa847b27\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.047551 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-logs\") pod \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.047590 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-ovsdbserver-nb\") pod \"196ce4b4-28af-4295-b20e-4d1cfa847b27\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.047618 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-config-data\") pod \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.047637 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-config-data-custom\") pod \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.047688 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2b24x\" (UniqueName: \"kubernetes.io/projected/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-kube-api-access-2b24x\") pod \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.047727 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3a392cdb-377e-4047-a1f4-f190429fe076-scripts\") pod \"3a392cdb-377e-4047-a1f4-f190429fe076\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.047754 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-ovsdbserver-sb\") pod \"196ce4b4-28af-4295-b20e-4d1cfa847b27\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.047791 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3a392cdb-377e-4047-a1f4-f190429fe076-horizon-secret-key\") pod \"3a392cdb-377e-4047-a1f4-f190429fe076\" (UID: \"3a392cdb-377e-4047-a1f4-f190429fe076\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.047827 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-dns-swift-storage-0\") pod \"196ce4b4-28af-4295-b20e-4d1cfa847b27\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.047845 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-combined-ca-bundle\") pod \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\" (UID: \"39bbf335-d8f3-41f0-84e8-b9589ff1b60c\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.047868 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cj7s8\" (UniqueName: \"kubernetes.io/projected/196ce4b4-28af-4295-b20e-4d1cfa847b27-kube-api-access-cj7s8\") pod \"196ce4b4-28af-4295-b20e-4d1cfa847b27\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.047885 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-config\") pod \"196ce4b4-28af-4295-b20e-4d1cfa847b27\" (UID: \"196ce4b4-28af-4295-b20e-4d1cfa847b27\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.048097 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bb2121e7-904c-4de4-a336-0ed681cd9be9-public-tls-certs\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.048127 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb2121e7-904c-4de4-a336-0ed681cd9be9-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.048182 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bb2121e7-904c-4de4-a336-0ed681cd9be9-etc-machine-id\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.048218 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bb2121e7-904c-4de4-a336-0ed681cd9be9-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.048244 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jj5dh\" (UniqueName: \"kubernetes.io/projected/bb2121e7-904c-4de4-a336-0ed681cd9be9-kube-api-access-jj5dh\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.048276 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bb2121e7-904c-4de4-a336-0ed681cd9be9-config-data-custom\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.048298 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb2121e7-904c-4de4-a336-0ed681cd9be9-config-data\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.048350 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bb2121e7-904c-4de4-a336-0ed681cd9be9-logs\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.048371 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb2121e7-904c-4de4-a336-0ed681cd9be9-scripts\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.048435 4871 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf64ca91-29f2-4c59-9010-155bd92559aa-logs\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.048451 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ll67v\" (UniqueName: \"kubernetes.io/projected/bf64ca91-29f2-4c59-9010-155bd92559aa-kube-api-access-ll67v\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.048462 4871 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bf64ca91-29f2-4c59-9010-155bd92559aa-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.048473 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf64ca91-29f2-4c59-9010-155bd92559aa-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.048482 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf64ca91-29f2-4c59-9010-155bd92559aa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.048501 4871 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.052491 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a392cdb-377e-4047-a1f4-f190429fe076-logs" (OuterVolumeSpecName: "logs") pod "3a392cdb-377e-4047-a1f4-f190429fe076" (UID: "3a392cdb-377e-4047-a1f4-f190429fe076"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.054598 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-logs" (OuterVolumeSpecName: "logs") pod "39bbf335-d8f3-41f0-84e8-b9589ff1b60c" (UID: "39bbf335-d8f3-41f0-84e8-b9589ff1b60c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.055900 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a392cdb-377e-4047-a1f4-f190429fe076-kube-api-access-ggnph" (OuterVolumeSpecName: "kube-api-access-ggnph") pod "3a392cdb-377e-4047-a1f4-f190429fe076" (UID: "3a392cdb-377e-4047-a1f4-f190429fe076"). InnerVolumeSpecName "kube-api-access-ggnph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.069326 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf64ca91-29f2-4c59-9010-155bd92559aa-config-data" (OuterVolumeSpecName: "config-data") pod "bf64ca91-29f2-4c59-9010-155bd92559aa" (UID: "bf64ca91-29f2-4c59-9010-155bd92559aa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.075081 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-kube-api-access-2b24x" (OuterVolumeSpecName: "kube-api-access-2b24x") pod "39bbf335-d8f3-41f0-84e8-b9589ff1b60c" (UID: "39bbf335-d8f3-41f0-84e8-b9589ff1b60c"). InnerVolumeSpecName "kube-api-access-2b24x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.075392 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "39bbf335-d8f3-41f0-84e8-b9589ff1b60c" (UID: "39bbf335-d8f3-41f0-84e8-b9589ff1b60c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.077223 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/196ce4b4-28af-4295-b20e-4d1cfa847b27-kube-api-access-cj7s8" (OuterVolumeSpecName: "kube-api-access-cj7s8") pod "196ce4b4-28af-4295-b20e-4d1cfa847b27" (UID: "196ce4b4-28af-4295-b20e-4d1cfa847b27"). InnerVolumeSpecName "kube-api-access-cj7s8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.078075 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a392cdb-377e-4047-a1f4-f190429fe076-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "3a392cdb-377e-4047-a1f4-f190429fe076" (UID: "3a392cdb-377e-4047-a1f4-f190429fe076"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.104857 4871 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.121197 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "196ce4b4-28af-4295-b20e-4d1cfa847b27" (UID: "196ce4b4-28af-4295-b20e-4d1cfa847b27"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.126612 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "39bbf335-d8f3-41f0-84e8-b9589ff1b60c" (UID: "39bbf335-d8f3-41f0-84e8-b9589ff1b60c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.144482 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a392cdb-377e-4047-a1f4-f190429fe076-scripts" (OuterVolumeSpecName: "scripts") pod "3a392cdb-377e-4047-a1f4-f190429fe076" (UID: "3a392cdb-377e-4047-a1f4-f190429fe076"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.147433 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "196ce4b4-28af-4295-b20e-4d1cfa847b27" (UID: "196ce4b4-28af-4295-b20e-4d1cfa847b27"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.149649 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bb2121e7-904c-4de4-a336-0ed681cd9be9-public-tls-certs\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.149760 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb2121e7-904c-4de4-a336-0ed681cd9be9-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.149857 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bb2121e7-904c-4de4-a336-0ed681cd9be9-etc-machine-id\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.149944 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bb2121e7-904c-4de4-a336-0ed681cd9be9-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.150046 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jj5dh\" (UniqueName: \"kubernetes.io/projected/bb2121e7-904c-4de4-a336-0ed681cd9be9-kube-api-access-jj5dh\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.150131 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bb2121e7-904c-4de4-a336-0ed681cd9be9-config-data-custom\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.150204 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb2121e7-904c-4de4-a336-0ed681cd9be9-config-data\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.150318 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bb2121e7-904c-4de4-a336-0ed681cd9be9-logs\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.150822 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb2121e7-904c-4de4-a336-0ed681cd9be9-scripts\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.150914 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bb2121e7-904c-4de4-a336-0ed681cd9be9-logs\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.150341 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bb2121e7-904c-4de4-a336-0ed681cd9be9-etc-machine-id\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.151129 4871 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a392cdb-377e-4047-a1f4-f190429fe076-logs\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.151195 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ggnph\" (UniqueName: \"kubernetes.io/projected/3a392cdb-377e-4047-a1f4-f190429fe076-kube-api-access-ggnph\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.151251 4871 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.151303 4871 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-logs\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.151360 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.151413 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf64ca91-29f2-4c59-9010-155bd92559aa-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.151466 4871 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.151518 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2b24x\" (UniqueName: \"kubernetes.io/projected/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-kube-api-access-2b24x\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.151590 4871 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.151643 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3a392cdb-377e-4047-a1f4-f190429fe076-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.151707 4871 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3a392cdb-377e-4047-a1f4-f190429fe076-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.152083 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.152137 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cj7s8\" (UniqueName: \"kubernetes.io/projected/196ce4b4-28af-4295-b20e-4d1cfa847b27-kube-api-access-cj7s8\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.154663 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb2121e7-904c-4de4-a336-0ed681cd9be9-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.156392 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-config" (OuterVolumeSpecName: "config") pod "196ce4b4-28af-4295-b20e-4d1cfa847b27" (UID: "196ce4b4-28af-4295-b20e-4d1cfa847b27"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.156673 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bb2121e7-904c-4de4-a336-0ed681cd9be9-public-tls-certs\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.157071 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb2121e7-904c-4de4-a336-0ed681cd9be9-config-data\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.157386 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb2121e7-904c-4de4-a336-0ed681cd9be9-scripts\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.158878 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bb2121e7-904c-4de4-a336-0ed681cd9be9-config-data-custom\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.158945 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bb2121e7-904c-4de4-a336-0ed681cd9be9-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.168300 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "196ce4b4-28af-4295-b20e-4d1cfa847b27" (UID: "196ce4b4-28af-4295-b20e-4d1cfa847b27"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.168765 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a392cdb-377e-4047-a1f4-f190429fe076-config-data" (OuterVolumeSpecName: "config-data") pod "3a392cdb-377e-4047-a1f4-f190429fe076" (UID: "3a392cdb-377e-4047-a1f4-f190429fe076"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.172275 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jj5dh\" (UniqueName: \"kubernetes.io/projected/bb2121e7-904c-4de4-a336-0ed681cd9be9-kube-api-access-jj5dh\") pod \"cinder-api-0\" (UID: \"bb2121e7-904c-4de4-a336-0ed681cd9be9\") " pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.174061 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-config-data" (OuterVolumeSpecName: "config-data") pod "39bbf335-d8f3-41f0-84e8-b9589ff1b60c" (UID: "39bbf335-d8f3-41f0-84e8-b9589ff1b60c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.191866 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "196ce4b4-28af-4295-b20e-4d1cfa847b27" (UID: "196ce4b4-28af-4295-b20e-4d1cfa847b27"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.253868 4871 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.253906 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.253917 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3a392cdb-377e-4047-a1f4-f190429fe076-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.253947 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/39bbf335-d8f3-41f0-84e8-b9589ff1b60c-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.253957 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/196ce4b4-28af-4295-b20e-4d1cfa847b27-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.293686 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.328813 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.468214 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vx8dc\" (UniqueName: \"kubernetes.io/projected/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-kube-api-access-vx8dc\") pod \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.468292 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-combined-ca-bundle\") pod \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.468317 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-httpd-config\") pod \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.468368 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-ovndb-tls-certs\") pod \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.475020 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-config\") pod \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\" (UID: \"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda\") " Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.495805 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-kube-api-access-vx8dc" (OuterVolumeSpecName: "kube-api-access-vx8dc") pod "1d07d92e-a5ac-479d-b6e3-2e175c5a6fda" (UID: "1d07d92e-a5ac-479d-b6e3-2e175c5a6fda"). InnerVolumeSpecName "kube-api-access-vx8dc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.505195 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "1d07d92e-a5ac-479d-b6e3-2e175c5a6fda" (UID: "1d07d92e-a5ac-479d-b6e3-2e175c5a6fda"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.575761 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "1d07d92e-a5ac-479d-b6e3-2e175c5a6fda" (UID: "1d07d92e-a5ac-479d-b6e3-2e175c5a6fda"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.578920 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vx8dc\" (UniqueName: \"kubernetes.io/projected/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-kube-api-access-vx8dc\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.578968 4871 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.578982 4871 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.582890 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1d07d92e-a5ac-479d-b6e3-2e175c5a6fda" (UID: "1d07d92e-a5ac-479d-b6e3-2e175c5a6fda"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.600162 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-config" (OuterVolumeSpecName: "config") pod "1d07d92e-a5ac-479d-b6e3-2e175c5a6fda" (UID: "1d07d92e-a5ac-479d-b6e3-2e175c5a6fda"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.616028 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.616101 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.616163 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.616961 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5865561ff4962bde5a4a448acaaef84f57651a9dc7c55ecf0253e295a67c98b1"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.617022 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://5865561ff4962bde5a4a448acaaef84f57651a9dc7c55ecf0253e295a67c98b1" gracePeriod=600 Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.680626 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.680667 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.813220 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="5865561ff4962bde5a4a448acaaef84f57651a9dc7c55ecf0253e295a67c98b1" exitCode=0 Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.813310 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"5865561ff4962bde5a4a448acaaef84f57651a9dc7c55ecf0253e295a67c98b1"} Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.813346 4871 scope.go:117] "RemoveContainer" containerID="352b2b280740af55cbe8f36dbe220adf905af3370f34cf811c417077b6fe54f3" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.816833 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.820226 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b724414-8682-4e73-8b2d-305fce381613","Type":"ContainerStarted","Data":"c4ae7d04f89c83f40d24079eb59cc37bea06daa2b09399a5acb003d216264e2f"} Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.820337 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9b724414-8682-4e73-8b2d-305fce381613" containerName="ceilometer-central-agent" containerID="cri-o://b7ca991c6e161910746bb8f6dd58276dd7ecdff6e5266dd6fb56dc67732661cb" gracePeriod=30 Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.820393 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.820413 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9b724414-8682-4e73-8b2d-305fce381613" containerName="proxy-httpd" containerID="cri-o://c4ae7d04f89c83f40d24079eb59cc37bea06daa2b09399a5acb003d216264e2f" gracePeriod=30 Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.820444 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9b724414-8682-4e73-8b2d-305fce381613" containerName="ceilometer-notification-agent" containerID="cri-o://9abd53467965e5620a4e437d0c8a83aa5ab6bc0029e0629f4ee0ba9fe83d1794" gracePeriod=30 Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.820456 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9b724414-8682-4e73-8b2d-305fce381613" containerName="sg-core" containerID="cri-o://cbe54d9697268692a87e8638c65d42920b895611c17f85cef1bc6670f93edeac" gracePeriod=30 Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.829163 4871 generic.go:334] "Generic (PLEG): container finished" podID="1d07d92e-a5ac-479d-b6e3-2e175c5a6fda" containerID="6fa2f0a36e08b2b0a47a777f1517f4b1b54854205106229f6ac85b7fa7095be1" exitCode=0 Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.829235 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-69c5fdfb8b-95x78" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.829261 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-69c5fdfb8b-95x78" event={"ID":"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda","Type":"ContainerDied","Data":"6fa2f0a36e08b2b0a47a777f1517f4b1b54854205106229f6ac85b7fa7095be1"} Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.829707 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-69c5fdfb8b-95x78" event={"ID":"1d07d92e-a5ac-479d-b6e3-2e175c5a6fda","Type":"ContainerDied","Data":"f6c3d3ac236b7d899f6a7270442b6fb076af5b98120629a770ff845dd597dd59"} Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.844065 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.343392209 podStartE2EDuration="1m16.844047562s" podCreationTimestamp="2025-11-26 05:43:37 +0000 UTC" firstStartedPulling="2025-11-26 05:43:39.145701491 +0000 UTC m=+1077.328753067" lastFinishedPulling="2025-11-26 05:44:52.646356834 +0000 UTC m=+1150.829408420" observedRunningTime="2025-11-26 05:44:53.841316594 +0000 UTC m=+1152.024368180" watchObservedRunningTime="2025-11-26 05:44:53.844047562 +0000 UTC m=+1152.027099158" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.848803 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" event={"ID":"196ce4b4-28af-4295-b20e-4d1cfa847b27","Type":"ContainerDied","Data":"dad594202630ab8d8490f9f9340ba70bd5980a82244816242ad725613b307b8b"} Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.848892 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7648c6b969-hsbsf" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.855281 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-67f98f67b9-25fgx" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.855861 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"48330a72-b80c-44bc-be03-c34038434964","Type":"ContainerStarted","Data":"edbbc7c95323677dda84b6c0b9b8438e3b0c84065d048e8f563313abcb37860d"} Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.856017 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="48330a72-b80c-44bc-be03-c34038434964" containerName="glance-log" containerID="cri-o://40992af910c532fd9ffddeb174d7b08a45eeb61f07d3994726ed691c3a790092" gracePeriod=30 Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.856249 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.856285 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="48330a72-b80c-44bc-be03-c34038434964" containerName="glance-httpd" containerID="cri-o://edbbc7c95323677dda84b6c0b9b8438e3b0c84065d048e8f563313abcb37860d" gracePeriod=30 Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.857165 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5995b75f66-tdlhq" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.933698 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=13.933676859 podStartE2EDuration="13.933676859s" podCreationTimestamp="2025-11-26 05:44:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:53.896427243 +0000 UTC m=+1152.079478839" watchObservedRunningTime="2025-11-26 05:44:53.933676859 +0000 UTC m=+1152.116728445" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.943350 4871 scope.go:117] "RemoveContainer" containerID="4fc8075f7ea2c2d8e90856b0d0dae6ae981048a1d2ec0c27926f0418a51e4749" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.946476 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-69c5fdfb8b-95x78"] Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.967698 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-69c5fdfb8b-95x78"] Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.979388 4871 scope.go:117] "RemoveContainer" containerID="6fa2f0a36e08b2b0a47a777f1517f4b1b54854205106229f6ac85b7fa7095be1" Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.983774 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7648c6b969-hsbsf"] Nov 26 05:44:53 crc kubenswrapper[4871]: I1126 05:44:53.995583 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7648c6b969-hsbsf"] Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.027479 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-67f98f67b9-25fgx"] Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.046653 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-67f98f67b9-25fgx"] Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.069860 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.082208 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.086869 4871 scope.go:117] "RemoveContainer" containerID="4fc8075f7ea2c2d8e90856b0d0dae6ae981048a1d2ec0c27926f0418a51e4749" Nov 26 05:44:54 crc kubenswrapper[4871]: E1126 05:44:54.087431 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fc8075f7ea2c2d8e90856b0d0dae6ae981048a1d2ec0c27926f0418a51e4749\": container with ID starting with 4fc8075f7ea2c2d8e90856b0d0dae6ae981048a1d2ec0c27926f0418a51e4749 not found: ID does not exist" containerID="4fc8075f7ea2c2d8e90856b0d0dae6ae981048a1d2ec0c27926f0418a51e4749" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.087473 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fc8075f7ea2c2d8e90856b0d0dae6ae981048a1d2ec0c27926f0418a51e4749"} err="failed to get container status \"4fc8075f7ea2c2d8e90856b0d0dae6ae981048a1d2ec0c27926f0418a51e4749\": rpc error: code = NotFound desc = could not find container \"4fc8075f7ea2c2d8e90856b0d0dae6ae981048a1d2ec0c27926f0418a51e4749\": container with ID starting with 4fc8075f7ea2c2d8e90856b0d0dae6ae981048a1d2ec0c27926f0418a51e4749 not found: ID does not exist" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.087497 4871 scope.go:117] "RemoveContainer" containerID="6fa2f0a36e08b2b0a47a777f1517f4b1b54854205106229f6ac85b7fa7095be1" Nov 26 05:44:54 crc kubenswrapper[4871]: E1126 05:44:54.087986 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6fa2f0a36e08b2b0a47a777f1517f4b1b54854205106229f6ac85b7fa7095be1\": container with ID starting with 6fa2f0a36e08b2b0a47a777f1517f4b1b54854205106229f6ac85b7fa7095be1 not found: ID does not exist" containerID="6fa2f0a36e08b2b0a47a777f1517f4b1b54854205106229f6ac85b7fa7095be1" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.088006 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fa2f0a36e08b2b0a47a777f1517f4b1b54854205106229f6ac85b7fa7095be1"} err="failed to get container status \"6fa2f0a36e08b2b0a47a777f1517f4b1b54854205106229f6ac85b7fa7095be1\": rpc error: code = NotFound desc = could not find container \"6fa2f0a36e08b2b0a47a777f1517f4b1b54854205106229f6ac85b7fa7095be1\": container with ID starting with 6fa2f0a36e08b2b0a47a777f1517f4b1b54854205106229f6ac85b7fa7095be1 not found: ID does not exist" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.088019 4871 scope.go:117] "RemoveContainer" containerID="6a1be64c982f17ed763f56f7483ff8f43ad84ec11a54f1b3259d818e95bd917b" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.101944 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5995b75f66-tdlhq"] Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.112415 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 05:44:54 crc kubenswrapper[4871]: E1126 05:44:54.112840 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d07d92e-a5ac-479d-b6e3-2e175c5a6fda" containerName="neutron-api" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.112858 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d07d92e-a5ac-479d-b6e3-2e175c5a6fda" containerName="neutron-api" Nov 26 05:44:54 crc kubenswrapper[4871]: E1126 05:44:54.112879 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d07d92e-a5ac-479d-b6e3-2e175c5a6fda" containerName="neutron-httpd" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.112885 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d07d92e-a5ac-479d-b6e3-2e175c5a6fda" containerName="neutron-httpd" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.113066 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d07d92e-a5ac-479d-b6e3-2e175c5a6fda" containerName="neutron-httpd" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.113098 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d07d92e-a5ac-479d-b6e3-2e175c5a6fda" containerName="neutron-api" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.114303 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.116865 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.116986 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.133131 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-5995b75f66-tdlhq"] Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.142322 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.150987 4871 scope.go:117] "RemoveContainer" containerID="11e4db97f8d04172acb4cb8165ff42f9de1bd3ca643bed403f6ecd7b4a7362b5" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.308083 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e27715b3-349a-4da9-806b-bac09bc34086-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.308335 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.308448 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e27715b3-349a-4da9-806b-bac09bc34086-logs\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.308483 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.308584 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-scripts\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.308647 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-config-data\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.308753 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.308883 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6w2mt\" (UniqueName: \"kubernetes.io/projected/e27715b3-349a-4da9-806b-bac09bc34086-kube-api-access-6w2mt\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.413509 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e27715b3-349a-4da9-806b-bac09bc34086-logs\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.413599 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.413628 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-scripts\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.413645 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-config-data\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.413684 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6w2mt\" (UniqueName: \"kubernetes.io/projected/e27715b3-349a-4da9-806b-bac09bc34086-kube-api-access-6w2mt\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.413705 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.413760 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e27715b3-349a-4da9-806b-bac09bc34086-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.413786 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.414039 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e27715b3-349a-4da9-806b-bac09bc34086-logs\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.414644 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.415850 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e27715b3-349a-4da9-806b-bac09bc34086-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.420731 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-config-data\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.422411 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.422490 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-scripts\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.423121 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.435611 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6w2mt\" (UniqueName: \"kubernetes.io/projected/e27715b3-349a-4da9-806b-bac09bc34086-kube-api-access-6w2mt\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.453017 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.533358 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="196ce4b4-28af-4295-b20e-4d1cfa847b27" path="/var/lib/kubelet/pods/196ce4b4-28af-4295-b20e-4d1cfa847b27/volumes" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.534643 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d07d92e-a5ac-479d-b6e3-2e175c5a6fda" path="/var/lib/kubelet/pods/1d07d92e-a5ac-479d-b6e3-2e175c5a6fda/volumes" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.535590 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1eca7326-5c90-4ff3-9ccb-879d01ed3e22" path="/var/lib/kubelet/pods/1eca7326-5c90-4ff3-9ccb-879d01ed3e22/volumes" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.537079 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39bbf335-d8f3-41f0-84e8-b9589ff1b60c" path="/var/lib/kubelet/pods/39bbf335-d8f3-41f0-84e8-b9589ff1b60c/volumes" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.537947 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a392cdb-377e-4047-a1f4-f190429fe076" path="/var/lib/kubelet/pods/3a392cdb-377e-4047-a1f4-f190429fe076/volumes" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.539516 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf64ca91-29f2-4c59-9010-155bd92559aa" path="/var/lib/kubelet/pods/bf64ca91-29f2-4c59-9010-155bd92559aa/volumes" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.661842 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.742067 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.823993 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/48330a72-b80c-44bc-be03-c34038434964-logs\") pod \"48330a72-b80c-44bc-be03-c34038434964\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.824355 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48330a72-b80c-44bc-be03-c34038434964-scripts\") pod \"48330a72-b80c-44bc-be03-c34038434964\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.824386 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"48330a72-b80c-44bc-be03-c34038434964\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.824488 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48330a72-b80c-44bc-be03-c34038434964-config-data\") pod \"48330a72-b80c-44bc-be03-c34038434964\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.824699 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/48330a72-b80c-44bc-be03-c34038434964-httpd-run\") pod \"48330a72-b80c-44bc-be03-c34038434964\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.824949 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/48330a72-b80c-44bc-be03-c34038434964-logs" (OuterVolumeSpecName: "logs") pod "48330a72-b80c-44bc-be03-c34038434964" (UID: "48330a72-b80c-44bc-be03-c34038434964"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.825139 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7tq2\" (UniqueName: \"kubernetes.io/projected/48330a72-b80c-44bc-be03-c34038434964-kube-api-access-w7tq2\") pod \"48330a72-b80c-44bc-be03-c34038434964\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.825179 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48330a72-b80c-44bc-be03-c34038434964-combined-ca-bundle\") pod \"48330a72-b80c-44bc-be03-c34038434964\" (UID: \"48330a72-b80c-44bc-be03-c34038434964\") " Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.825299 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/48330a72-b80c-44bc-be03-c34038434964-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "48330a72-b80c-44bc-be03-c34038434964" (UID: "48330a72-b80c-44bc-be03-c34038434964"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.825833 4871 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/48330a72-b80c-44bc-be03-c34038434964-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.825859 4871 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/48330a72-b80c-44bc-be03-c34038434964-logs\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.828305 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48330a72-b80c-44bc-be03-c34038434964-scripts" (OuterVolumeSpecName: "scripts") pod "48330a72-b80c-44bc-be03-c34038434964" (UID: "48330a72-b80c-44bc-be03-c34038434964"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.828745 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "48330a72-b80c-44bc-be03-c34038434964" (UID: "48330a72-b80c-44bc-be03-c34038434964"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.829668 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48330a72-b80c-44bc-be03-c34038434964-kube-api-access-w7tq2" (OuterVolumeSpecName: "kube-api-access-w7tq2") pod "48330a72-b80c-44bc-be03-c34038434964" (UID: "48330a72-b80c-44bc-be03-c34038434964"). InnerVolumeSpecName "kube-api-access-w7tq2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.880390 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.882982 4871 generic.go:334] "Generic (PLEG): container finished" podID="9b724414-8682-4e73-8b2d-305fce381613" containerID="c4ae7d04f89c83f40d24079eb59cc37bea06daa2b09399a5acb003d216264e2f" exitCode=0 Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.883060 4871 generic.go:334] "Generic (PLEG): container finished" podID="9b724414-8682-4e73-8b2d-305fce381613" containerID="cbe54d9697268692a87e8638c65d42920b895611c17f85cef1bc6670f93edeac" exitCode=2 Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.883070 4871 generic.go:334] "Generic (PLEG): container finished" podID="9b724414-8682-4e73-8b2d-305fce381613" containerID="b7ca991c6e161910746bb8f6dd58276dd7ecdff6e5266dd6fb56dc67732661cb" exitCode=0 Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.883129 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b724414-8682-4e73-8b2d-305fce381613","Type":"ContainerDied","Data":"c4ae7d04f89c83f40d24079eb59cc37bea06daa2b09399a5acb003d216264e2f"} Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.883189 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b724414-8682-4e73-8b2d-305fce381613","Type":"ContainerDied","Data":"cbe54d9697268692a87e8638c65d42920b895611c17f85cef1bc6670f93edeac"} Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.883198 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b724414-8682-4e73-8b2d-305fce381613","Type":"ContainerDied","Data":"b7ca991c6e161910746bb8f6dd58276dd7ecdff6e5266dd6fb56dc67732661cb"} Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.885092 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48330a72-b80c-44bc-be03-c34038434964-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "48330a72-b80c-44bc-be03-c34038434964" (UID: "48330a72-b80c-44bc-be03-c34038434964"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.899342 4871 generic.go:334] "Generic (PLEG): container finished" podID="bfcbd2ac-54bd-46ea-8bed-81093bee3d65" containerID="45e2f38f04bfadc63926bda979815e00cfc7dc1296de8186f8a8c9c8ea2b72df" exitCode=0 Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.899638 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.899753 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bfcbd2ac-54bd-46ea-8bed-81093bee3d65","Type":"ContainerDied","Data":"45e2f38f04bfadc63926bda979815e00cfc7dc1296de8186f8a8c9c8ea2b72df"} Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.899788 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"bfcbd2ac-54bd-46ea-8bed-81093bee3d65","Type":"ContainerDied","Data":"573f16342110d11521b725fc9fd0d4d838ea3bd24dc0cef0214aedffa7a4494e"} Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.899804 4871 scope.go:117] "RemoveContainer" containerID="6e08f547dee63f19308ab526809c8a9a451e5a5307d903add01e274d89904b73" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.902820 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48330a72-b80c-44bc-be03-c34038434964-config-data" (OuterVolumeSpecName: "config-data") pod "48330a72-b80c-44bc-be03-c34038434964" (UID: "48330a72-b80c-44bc-be03-c34038434964"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.922928 4871 generic.go:334] "Generic (PLEG): container finished" podID="48330a72-b80c-44bc-be03-c34038434964" containerID="edbbc7c95323677dda84b6c0b9b8438e3b0c84065d048e8f563313abcb37860d" exitCode=0 Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.922978 4871 generic.go:334] "Generic (PLEG): container finished" podID="48330a72-b80c-44bc-be03-c34038434964" containerID="40992af910c532fd9ffddeb174d7b08a45eeb61f07d3994726ed691c3a790092" exitCode=143 Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.923121 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.924151 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"48330a72-b80c-44bc-be03-c34038434964","Type":"ContainerDied","Data":"edbbc7c95323677dda84b6c0b9b8438e3b0c84065d048e8f563313abcb37860d"} Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.924183 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"48330a72-b80c-44bc-be03-c34038434964","Type":"ContainerDied","Data":"40992af910c532fd9ffddeb174d7b08a45eeb61f07d3994726ed691c3a790092"} Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.924194 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"48330a72-b80c-44bc-be03-c34038434964","Type":"ContainerDied","Data":"483e8ad9c97500671ed4ec2f255c4c996946e8fc62ddd97ef9973ffcbf95a10e"} Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.927020 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48330a72-b80c-44bc-be03-c34038434964-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.927047 4871 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.927059 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48330a72-b80c-44bc-be03-c34038434964-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.927069 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7tq2\" (UniqueName: \"kubernetes.io/projected/48330a72-b80c-44bc-be03-c34038434964-kube-api-access-w7tq2\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.927078 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48330a72-b80c-44bc-be03-c34038434964-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.927983 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"37292f3b6ef7c2c0c15724c5c3a632dff71152a03a81708ad9d2ed933a0a1b15"} Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.934897 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bb2121e7-904c-4de4-a336-0ed681cd9be9","Type":"ContainerStarted","Data":"86e636bce4c97028d21b6deeb180be690ddaef55bd9fd5807f3e43c9ef580d29"} Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.934945 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bb2121e7-904c-4de4-a336-0ed681cd9be9","Type":"ContainerStarted","Data":"1c68071a30cd3e430da9c1ac142fafa94e172c728df52552adf49c9c7d340286"} Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.965938 4871 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.980556 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.992615 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 05:44:54 crc kubenswrapper[4871]: I1126 05:44:54.995292 4871 scope.go:117] "RemoveContainer" containerID="45e2f38f04bfadc63926bda979815e00cfc7dc1296de8186f8a8c9c8ea2b72df" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.001914 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 05:44:55 crc kubenswrapper[4871]: E1126 05:44:55.002402 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48330a72-b80c-44bc-be03-c34038434964" containerName="glance-log" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.002417 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="48330a72-b80c-44bc-be03-c34038434964" containerName="glance-log" Nov 26 05:44:55 crc kubenswrapper[4871]: E1126 05:44:55.002431 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48330a72-b80c-44bc-be03-c34038434964" containerName="glance-httpd" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.002436 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="48330a72-b80c-44bc-be03-c34038434964" containerName="glance-httpd" Nov 26 05:44:55 crc kubenswrapper[4871]: E1126 05:44:55.002465 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfcbd2ac-54bd-46ea-8bed-81093bee3d65" containerName="cinder-scheduler" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.002472 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfcbd2ac-54bd-46ea-8bed-81093bee3d65" containerName="cinder-scheduler" Nov 26 05:44:55 crc kubenswrapper[4871]: E1126 05:44:55.002498 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfcbd2ac-54bd-46ea-8bed-81093bee3d65" containerName="probe" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.002504 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfcbd2ac-54bd-46ea-8bed-81093bee3d65" containerName="probe" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.002760 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfcbd2ac-54bd-46ea-8bed-81093bee3d65" containerName="probe" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.002786 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="48330a72-b80c-44bc-be03-c34038434964" containerName="glance-httpd" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.002804 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfcbd2ac-54bd-46ea-8bed-81093bee3d65" containerName="cinder-scheduler" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.002816 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="48330a72-b80c-44bc-be03-c34038434964" containerName="glance-log" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.005152 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.006188 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.009044 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.009332 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.037362 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-config-data-custom\") pod \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.037402 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-etc-machine-id\") pod \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.037605 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-config-data\") pod \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.037648 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqfcr\" (UniqueName: \"kubernetes.io/projected/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-kube-api-access-gqfcr\") pod \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.037670 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-combined-ca-bundle\") pod \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.037725 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-scripts\") pod \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\" (UID: \"bfcbd2ac-54bd-46ea-8bed-81093bee3d65\") " Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.042826 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "bfcbd2ac-54bd-46ea-8bed-81093bee3d65" (UID: "bfcbd2ac-54bd-46ea-8bed-81093bee3d65"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.045881 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "bfcbd2ac-54bd-46ea-8bed-81093bee3d65" (UID: "bfcbd2ac-54bd-46ea-8bed-81093bee3d65"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.046666 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-kube-api-access-gqfcr" (OuterVolumeSpecName: "kube-api-access-gqfcr") pod "bfcbd2ac-54bd-46ea-8bed-81093bee3d65" (UID: "bfcbd2ac-54bd-46ea-8bed-81093bee3d65"). InnerVolumeSpecName "kube-api-access-gqfcr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.054071 4871 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.059667 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-scripts" (OuterVolumeSpecName: "scripts") pod "bfcbd2ac-54bd-46ea-8bed-81093bee3d65" (UID: "bfcbd2ac-54bd-46ea-8bed-81093bee3d65"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.079500 4871 scope.go:117] "RemoveContainer" containerID="6e08f547dee63f19308ab526809c8a9a451e5a5307d903add01e274d89904b73" Nov 26 05:44:55 crc kubenswrapper[4871]: E1126 05:44:55.079982 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e08f547dee63f19308ab526809c8a9a451e5a5307d903add01e274d89904b73\": container with ID starting with 6e08f547dee63f19308ab526809c8a9a451e5a5307d903add01e274d89904b73 not found: ID does not exist" containerID="6e08f547dee63f19308ab526809c8a9a451e5a5307d903add01e274d89904b73" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.080040 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e08f547dee63f19308ab526809c8a9a451e5a5307d903add01e274d89904b73"} err="failed to get container status \"6e08f547dee63f19308ab526809c8a9a451e5a5307d903add01e274d89904b73\": rpc error: code = NotFound desc = could not find container \"6e08f547dee63f19308ab526809c8a9a451e5a5307d903add01e274d89904b73\": container with ID starting with 6e08f547dee63f19308ab526809c8a9a451e5a5307d903add01e274d89904b73 not found: ID does not exist" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.080071 4871 scope.go:117] "RemoveContainer" containerID="45e2f38f04bfadc63926bda979815e00cfc7dc1296de8186f8a8c9c8ea2b72df" Nov 26 05:44:55 crc kubenswrapper[4871]: E1126 05:44:55.080685 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45e2f38f04bfadc63926bda979815e00cfc7dc1296de8186f8a8c9c8ea2b72df\": container with ID starting with 45e2f38f04bfadc63926bda979815e00cfc7dc1296de8186f8a8c9c8ea2b72df not found: ID does not exist" containerID="45e2f38f04bfadc63926bda979815e00cfc7dc1296de8186f8a8c9c8ea2b72df" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.080713 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45e2f38f04bfadc63926bda979815e00cfc7dc1296de8186f8a8c9c8ea2b72df"} err="failed to get container status \"45e2f38f04bfadc63926bda979815e00cfc7dc1296de8186f8a8c9c8ea2b72df\": rpc error: code = NotFound desc = could not find container \"45e2f38f04bfadc63926bda979815e00cfc7dc1296de8186f8a8c9c8ea2b72df\": container with ID starting with 45e2f38f04bfadc63926bda979815e00cfc7dc1296de8186f8a8c9c8ea2b72df not found: ID does not exist" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.080729 4871 scope.go:117] "RemoveContainer" containerID="edbbc7c95323677dda84b6c0b9b8438e3b0c84065d048e8f563313abcb37860d" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.125636 4871 scope.go:117] "RemoveContainer" containerID="40992af910c532fd9ffddeb174d7b08a45eeb61f07d3994726ed691c3a790092" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.129032 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bfcbd2ac-54bd-46ea-8bed-81093bee3d65" (UID: "bfcbd2ac-54bd-46ea-8bed-81093bee3d65"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.145790 4871 scope.go:117] "RemoveContainer" containerID="edbbc7c95323677dda84b6c0b9b8438e3b0c84065d048e8f563313abcb37860d" Nov 26 05:44:55 crc kubenswrapper[4871]: E1126 05:44:55.146421 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edbbc7c95323677dda84b6c0b9b8438e3b0c84065d048e8f563313abcb37860d\": container with ID starting with edbbc7c95323677dda84b6c0b9b8438e3b0c84065d048e8f563313abcb37860d not found: ID does not exist" containerID="edbbc7c95323677dda84b6c0b9b8438e3b0c84065d048e8f563313abcb37860d" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.146468 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edbbc7c95323677dda84b6c0b9b8438e3b0c84065d048e8f563313abcb37860d"} err="failed to get container status \"edbbc7c95323677dda84b6c0b9b8438e3b0c84065d048e8f563313abcb37860d\": rpc error: code = NotFound desc = could not find container \"edbbc7c95323677dda84b6c0b9b8438e3b0c84065d048e8f563313abcb37860d\": container with ID starting with edbbc7c95323677dda84b6c0b9b8438e3b0c84065d048e8f563313abcb37860d not found: ID does not exist" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.146493 4871 scope.go:117] "RemoveContainer" containerID="40992af910c532fd9ffddeb174d7b08a45eeb61f07d3994726ed691c3a790092" Nov 26 05:44:55 crc kubenswrapper[4871]: E1126 05:44:55.147625 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40992af910c532fd9ffddeb174d7b08a45eeb61f07d3994726ed691c3a790092\": container with ID starting with 40992af910c532fd9ffddeb174d7b08a45eeb61f07d3994726ed691c3a790092 not found: ID does not exist" containerID="40992af910c532fd9ffddeb174d7b08a45eeb61f07d3994726ed691c3a790092" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.147681 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40992af910c532fd9ffddeb174d7b08a45eeb61f07d3994726ed691c3a790092"} err="failed to get container status \"40992af910c532fd9ffddeb174d7b08a45eeb61f07d3994726ed691c3a790092\": rpc error: code = NotFound desc = could not find container \"40992af910c532fd9ffddeb174d7b08a45eeb61f07d3994726ed691c3a790092\": container with ID starting with 40992af910c532fd9ffddeb174d7b08a45eeb61f07d3994726ed691c3a790092 not found: ID does not exist" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.147697 4871 scope.go:117] "RemoveContainer" containerID="edbbc7c95323677dda84b6c0b9b8438e3b0c84065d048e8f563313abcb37860d" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.147949 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edbbc7c95323677dda84b6c0b9b8438e3b0c84065d048e8f563313abcb37860d"} err="failed to get container status \"edbbc7c95323677dda84b6c0b9b8438e3b0c84065d048e8f563313abcb37860d\": rpc error: code = NotFound desc = could not find container \"edbbc7c95323677dda84b6c0b9b8438e3b0c84065d048e8f563313abcb37860d\": container with ID starting with edbbc7c95323677dda84b6c0b9b8438e3b0c84065d048e8f563313abcb37860d not found: ID does not exist" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.148004 4871 scope.go:117] "RemoveContainer" containerID="40992af910c532fd9ffddeb174d7b08a45eeb61f07d3994726ed691c3a790092" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.150075 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40992af910c532fd9ffddeb174d7b08a45eeb61f07d3994726ed691c3a790092"} err="failed to get container status \"40992af910c532fd9ffddeb174d7b08a45eeb61f07d3994726ed691c3a790092\": rpc error: code = NotFound desc = could not find container \"40992af910c532fd9ffddeb174d7b08a45eeb61f07d3994726ed691c3a790092\": container with ID starting with 40992af910c532fd9ffddeb174d7b08a45eeb61f07d3994726ed691c3a790092 not found: ID does not exist" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.155055 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.155092 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.155144 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.155164 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.155215 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c03afa4-2257-4e38-b59a-04cdcc8060e4-logs\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.155241 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztspb\" (UniqueName: \"kubernetes.io/projected/3c03afa4-2257-4e38-b59a-04cdcc8060e4-kube-api-access-ztspb\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.155351 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.155391 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3c03afa4-2257-4e38-b59a-04cdcc8060e4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.155444 4871 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.155456 4871 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.155467 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqfcr\" (UniqueName: \"kubernetes.io/projected/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-kube-api-access-gqfcr\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.155477 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.155487 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.186927 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-config-data" (OuterVolumeSpecName: "config-data") pod "bfcbd2ac-54bd-46ea-8bed-81093bee3d65" (UID: "bfcbd2ac-54bd-46ea-8bed-81093bee3d65"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.237377 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.246136 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.256880 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.256962 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3c03afa4-2257-4e38-b59a-04cdcc8060e4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.256999 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.257017 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.257090 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.257111 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.257174 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c03afa4-2257-4e38-b59a-04cdcc8060e4-logs\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.257207 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztspb\" (UniqueName: \"kubernetes.io/projected/3c03afa4-2257-4e38-b59a-04cdcc8060e4-kube-api-access-ztspb\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.257266 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfcbd2ac-54bd-46ea-8bed-81093bee3d65-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.257933 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.265357 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3c03afa4-2257-4e38-b59a-04cdcc8060e4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.267011 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c03afa4-2257-4e38-b59a-04cdcc8060e4-logs\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.280676 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.281369 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.292075 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.292554 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztspb\" (UniqueName: \"kubernetes.io/projected/3c03afa4-2257-4e38-b59a-04cdcc8060e4-kube-api-access-ztspb\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.310674 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.313260 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.317004 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.322769 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.330260 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.353267 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.368744 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adddc22d-b976-4931-8dde-359f0952b438-config-data\") pod \"cinder-scheduler-0\" (UID: \"adddc22d-b976-4931-8dde-359f0952b438\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.369132 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adddc22d-b976-4931-8dde-359f0952b438-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"adddc22d-b976-4931-8dde-359f0952b438\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.369295 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/adddc22d-b976-4931-8dde-359f0952b438-scripts\") pod \"cinder-scheduler-0\" (UID: \"adddc22d-b976-4931-8dde-359f0952b438\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.369325 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/adddc22d-b976-4931-8dde-359f0952b438-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"adddc22d-b976-4931-8dde-359f0952b438\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.369361 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/adddc22d-b976-4931-8dde-359f0952b438-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"adddc22d-b976-4931-8dde-359f0952b438\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.369456 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-827cw\" (UniqueName: \"kubernetes.io/projected/adddc22d-b976-4931-8dde-359f0952b438-kube-api-access-827cw\") pod \"cinder-scheduler-0\" (UID: \"adddc22d-b976-4931-8dde-359f0952b438\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.415228 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 05:44:55 crc kubenswrapper[4871]: W1126 05:44:55.422534 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode27715b3_349a_4da9_806b_bac09bc34086.slice/crio-04cef573aea9df6791ee2358661bf6148dcf256931f96704ad03c6814bd62332 WatchSource:0}: Error finding container 04cef573aea9df6791ee2358661bf6148dcf256931f96704ad03c6814bd62332: Status 404 returned error can't find the container with id 04cef573aea9df6791ee2358661bf6148dcf256931f96704ad03c6814bd62332 Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.474898 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adddc22d-b976-4931-8dde-359f0952b438-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"adddc22d-b976-4931-8dde-359f0952b438\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.475191 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/adddc22d-b976-4931-8dde-359f0952b438-scripts\") pod \"cinder-scheduler-0\" (UID: \"adddc22d-b976-4931-8dde-359f0952b438\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.475292 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/adddc22d-b976-4931-8dde-359f0952b438-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"adddc22d-b976-4931-8dde-359f0952b438\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.475417 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/adddc22d-b976-4931-8dde-359f0952b438-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"adddc22d-b976-4931-8dde-359f0952b438\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.475560 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-827cw\" (UniqueName: \"kubernetes.io/projected/adddc22d-b976-4931-8dde-359f0952b438-kube-api-access-827cw\") pod \"cinder-scheduler-0\" (UID: \"adddc22d-b976-4931-8dde-359f0952b438\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.475733 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adddc22d-b976-4931-8dde-359f0952b438-config-data\") pod \"cinder-scheduler-0\" (UID: \"adddc22d-b976-4931-8dde-359f0952b438\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.476632 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/adddc22d-b976-4931-8dde-359f0952b438-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"adddc22d-b976-4931-8dde-359f0952b438\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.482112 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adddc22d-b976-4931-8dde-359f0952b438-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"adddc22d-b976-4931-8dde-359f0952b438\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.482885 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/adddc22d-b976-4931-8dde-359f0952b438-scripts\") pod \"cinder-scheduler-0\" (UID: \"adddc22d-b976-4931-8dde-359f0952b438\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.483394 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adddc22d-b976-4931-8dde-359f0952b438-config-data\") pod \"cinder-scheduler-0\" (UID: \"adddc22d-b976-4931-8dde-359f0952b438\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.484717 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/adddc22d-b976-4931-8dde-359f0952b438-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"adddc22d-b976-4931-8dde-359f0952b438\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.514726 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-827cw\" (UniqueName: \"kubernetes.io/projected/adddc22d-b976-4931-8dde-359f0952b438-kube-api-access-827cw\") pod \"cinder-scheduler-0\" (UID: \"adddc22d-b976-4931-8dde-359f0952b438\") " pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.639827 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.660946 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.950653 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bb2121e7-904c-4de4-a336-0ed681cd9be9","Type":"ContainerStarted","Data":"e100ff23efe4179d3750e4d3d868a6b9d8ed81f9a1a0c187ce6ec08616a622bb"} Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.951376 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 26 05:44:55 crc kubenswrapper[4871]: I1126 05:44:55.978920 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e27715b3-349a-4da9-806b-bac09bc34086","Type":"ContainerStarted","Data":"04cef573aea9df6791ee2358661bf6148dcf256931f96704ad03c6814bd62332"} Nov 26 05:44:56 crc kubenswrapper[4871]: I1126 05:44:56.238443 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.238416934 podStartE2EDuration="4.238416934s" podCreationTimestamp="2025-11-26 05:44:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:55.977262105 +0000 UTC m=+1154.160313691" watchObservedRunningTime="2025-11-26 05:44:56.238416934 +0000 UTC m=+1154.421468530" Nov 26 05:44:56 crc kubenswrapper[4871]: I1126 05:44:56.246355 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 05:44:56 crc kubenswrapper[4871]: I1126 05:44:56.299070 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 05:44:56 crc kubenswrapper[4871]: W1126 05:44:56.334002 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podadddc22d_b976_4931_8dde_359f0952b438.slice/crio-7d0d386c5f5689970037fb27daa17f750db3e092afd08447a8c788b7b9f0890c WatchSource:0}: Error finding container 7d0d386c5f5689970037fb27daa17f750db3e092afd08447a8c788b7b9f0890c: Status 404 returned error can't find the container with id 7d0d386c5f5689970037fb27daa17f750db3e092afd08447a8c788b7b9f0890c Nov 26 05:44:56 crc kubenswrapper[4871]: I1126 05:44:56.530322 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48330a72-b80c-44bc-be03-c34038434964" path="/var/lib/kubelet/pods/48330a72-b80c-44bc-be03-c34038434964/volumes" Nov 26 05:44:56 crc kubenswrapper[4871]: I1126 05:44:56.531397 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bfcbd2ac-54bd-46ea-8bed-81093bee3d65" path="/var/lib/kubelet/pods/bfcbd2ac-54bd-46ea-8bed-81093bee3d65/volumes" Nov 26 05:44:56 crc kubenswrapper[4871]: I1126 05:44:56.971136 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-8665945b44-wbcwv" podUID="812fa0f1-c216-4db1-b3e6-cfa862b8cb93" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.163:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.163:8443: connect: connection refused" Nov 26 05:44:57 crc kubenswrapper[4871]: I1126 05:44:57.035313 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"adddc22d-b976-4931-8dde-359f0952b438","Type":"ContainerStarted","Data":"f744fed45cff42e0147c521bb7056ee6b2a8d9ca3077bd9d7809cd1e4268ad7b"} Nov 26 05:44:57 crc kubenswrapper[4871]: I1126 05:44:57.035372 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"adddc22d-b976-4931-8dde-359f0952b438","Type":"ContainerStarted","Data":"7d0d386c5f5689970037fb27daa17f750db3e092afd08447a8c788b7b9f0890c"} Nov 26 05:44:57 crc kubenswrapper[4871]: I1126 05:44:57.040780 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e27715b3-349a-4da9-806b-bac09bc34086","Type":"ContainerStarted","Data":"05deadaf9cf068234e04038e52495b9c032ea35da37d52e6e0f831d3200902bf"} Nov 26 05:44:57 crc kubenswrapper[4871]: I1126 05:44:57.040830 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e27715b3-349a-4da9-806b-bac09bc34086","Type":"ContainerStarted","Data":"83be37e4ac8fbf2ead804e3b93191a3c997ef0f5ccc9646ddd8581467b4a51ea"} Nov 26 05:44:57 crc kubenswrapper[4871]: I1126 05:44:57.047449 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3c03afa4-2257-4e38-b59a-04cdcc8060e4","Type":"ContainerStarted","Data":"a37f7c1dd217788f012692caa6dcf8d8413cd3b61d4cf3cf6c0084a745500919"} Nov 26 05:44:57 crc kubenswrapper[4871]: I1126 05:44:57.047493 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3c03afa4-2257-4e38-b59a-04cdcc8060e4","Type":"ContainerStarted","Data":"eaa69e56dc3d992bdc00253a48e26aebda54c4128e6e522089cb7af3e4549700"} Nov 26 05:44:57 crc kubenswrapper[4871]: I1126 05:44:57.507169 4871 scope.go:117] "RemoveContainer" containerID="9e2d82019cfae3a801741429cd76d41e71f620675230c40d3af434b0678c5b24" Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.059958 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"adddc22d-b976-4931-8dde-359f0952b438","Type":"ContainerStarted","Data":"2a6b7cab8a6afbc8ecda802f0fdc4290f1152cb8132fd87d214870d2d726e314"} Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.061784 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"906807e1-f724-4ab4-9ccc-95656188890e","Type":"ContainerStarted","Data":"c0dee9dbf5b0d8070947de7e352e4d9d380476356ecdfa075feffa0ed0cfbdac"} Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.065937 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3c03afa4-2257-4e38-b59a-04cdcc8060e4","Type":"ContainerStarted","Data":"0ecb79534560d8f2f10ebe835141ff761444eaff9dc3f528737ed699b8494ed2"} Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.081974 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.081951729 podStartE2EDuration="3.081951729s" podCreationTimestamp="2025-11-26 05:44:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:58.078803561 +0000 UTC m=+1156.261855187" watchObservedRunningTime="2025-11-26 05:44:58.081951729 +0000 UTC m=+1156.265003395" Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.091840 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.091774213 podStartE2EDuration="5.091774213s" podCreationTimestamp="2025-11-26 05:44:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:57.068810886 +0000 UTC m=+1155.251862492" watchObservedRunningTime="2025-11-26 05:44:58.091774213 +0000 UTC m=+1156.274825849" Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.108723 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.108697584 podStartE2EDuration="4.108697584s" podCreationTimestamp="2025-11-26 05:44:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:44:58.101868464 +0000 UTC m=+1156.284920040" watchObservedRunningTime="2025-11-26 05:44:58.108697584 +0000 UTC m=+1156.291749180" Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.750187 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.842725 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b724414-8682-4e73-8b2d-305fce381613-log-httpd\") pod \"9b724414-8682-4e73-8b2d-305fce381613\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.842795 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-sg-core-conf-yaml\") pod \"9b724414-8682-4e73-8b2d-305fce381613\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.842857 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-config-data\") pod \"9b724414-8682-4e73-8b2d-305fce381613\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.843076 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-combined-ca-bundle\") pod \"9b724414-8682-4e73-8b2d-305fce381613\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.843107 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-scripts\") pod \"9b724414-8682-4e73-8b2d-305fce381613\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.843165 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b724414-8682-4e73-8b2d-305fce381613-run-httpd\") pod \"9b724414-8682-4e73-8b2d-305fce381613\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.843191 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjzj7\" (UniqueName: \"kubernetes.io/projected/9b724414-8682-4e73-8b2d-305fce381613-kube-api-access-sjzj7\") pod \"9b724414-8682-4e73-8b2d-305fce381613\" (UID: \"9b724414-8682-4e73-8b2d-305fce381613\") " Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.843800 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b724414-8682-4e73-8b2d-305fce381613-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9b724414-8682-4e73-8b2d-305fce381613" (UID: "9b724414-8682-4e73-8b2d-305fce381613"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.844203 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b724414-8682-4e73-8b2d-305fce381613-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9b724414-8682-4e73-8b2d-305fce381613" (UID: "9b724414-8682-4e73-8b2d-305fce381613"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.849828 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b724414-8682-4e73-8b2d-305fce381613-kube-api-access-sjzj7" (OuterVolumeSpecName: "kube-api-access-sjzj7") pod "9b724414-8682-4e73-8b2d-305fce381613" (UID: "9b724414-8682-4e73-8b2d-305fce381613"). InnerVolumeSpecName "kube-api-access-sjzj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.865728 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-scripts" (OuterVolumeSpecName: "scripts") pod "9b724414-8682-4e73-8b2d-305fce381613" (UID: "9b724414-8682-4e73-8b2d-305fce381613"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.877609 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9b724414-8682-4e73-8b2d-305fce381613" (UID: "9b724414-8682-4e73-8b2d-305fce381613"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.945251 4871 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b724414-8682-4e73-8b2d-305fce381613-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.945292 4871 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.945307 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.945318 4871 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b724414-8682-4e73-8b2d-305fce381613-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.945328 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjzj7\" (UniqueName: \"kubernetes.io/projected/9b724414-8682-4e73-8b2d-305fce381613-kube-api-access-sjzj7\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.949164 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9b724414-8682-4e73-8b2d-305fce381613" (UID: "9b724414-8682-4e73-8b2d-305fce381613"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:58 crc kubenswrapper[4871]: I1126 05:44:58.986609 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-config-data" (OuterVolumeSpecName: "config-data") pod "9b724414-8682-4e73-8b2d-305fce381613" (UID: "9b724414-8682-4e73-8b2d-305fce381613"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.047481 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.047515 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b724414-8682-4e73-8b2d-305fce381613-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.077316 4871 generic.go:334] "Generic (PLEG): container finished" podID="9b724414-8682-4e73-8b2d-305fce381613" containerID="9abd53467965e5620a4e437d0c8a83aa5ab6bc0029e0629f4ee0ba9fe83d1794" exitCode=0 Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.078281 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.088014 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b724414-8682-4e73-8b2d-305fce381613","Type":"ContainerDied","Data":"9abd53467965e5620a4e437d0c8a83aa5ab6bc0029e0629f4ee0ba9fe83d1794"} Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.088331 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b724414-8682-4e73-8b2d-305fce381613","Type":"ContainerDied","Data":"eb4b57b6a55dc1bcf36abda5f3da18db0b640113bc05ecf387d379d5733a9fef"} Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.088356 4871 scope.go:117] "RemoveContainer" containerID="c4ae7d04f89c83f40d24079eb59cc37bea06daa2b09399a5acb003d216264e2f" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.114103 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.134714 4871 scope.go:117] "RemoveContainer" containerID="cbe54d9697268692a87e8638c65d42920b895611c17f85cef1bc6670f93edeac" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.142905 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.165328 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:44:59 crc kubenswrapper[4871]: E1126 05:44:59.165868 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b724414-8682-4e73-8b2d-305fce381613" containerName="ceilometer-notification-agent" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.165892 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b724414-8682-4e73-8b2d-305fce381613" containerName="ceilometer-notification-agent" Nov 26 05:44:59 crc kubenswrapper[4871]: E1126 05:44:59.165929 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b724414-8682-4e73-8b2d-305fce381613" containerName="sg-core" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.165939 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b724414-8682-4e73-8b2d-305fce381613" containerName="sg-core" Nov 26 05:44:59 crc kubenswrapper[4871]: E1126 05:44:59.165958 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b724414-8682-4e73-8b2d-305fce381613" containerName="proxy-httpd" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.165966 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b724414-8682-4e73-8b2d-305fce381613" containerName="proxy-httpd" Nov 26 05:44:59 crc kubenswrapper[4871]: E1126 05:44:59.165984 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b724414-8682-4e73-8b2d-305fce381613" containerName="ceilometer-central-agent" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.165994 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b724414-8682-4e73-8b2d-305fce381613" containerName="ceilometer-central-agent" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.166212 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b724414-8682-4e73-8b2d-305fce381613" containerName="ceilometer-notification-agent" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.166229 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b724414-8682-4e73-8b2d-305fce381613" containerName="proxy-httpd" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.166247 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b724414-8682-4e73-8b2d-305fce381613" containerName="sg-core" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.166263 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b724414-8682-4e73-8b2d-305fce381613" containerName="ceilometer-central-agent" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.168613 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.171225 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.175021 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.176726 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.181445 4871 scope.go:117] "RemoveContainer" containerID="9abd53467965e5620a4e437d0c8a83aa5ab6bc0029e0629f4ee0ba9fe83d1794" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.222852 4871 scope.go:117] "RemoveContainer" containerID="b7ca991c6e161910746bb8f6dd58276dd7ecdff6e5266dd6fb56dc67732661cb" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.249029 4871 scope.go:117] "RemoveContainer" containerID="c4ae7d04f89c83f40d24079eb59cc37bea06daa2b09399a5acb003d216264e2f" Nov 26 05:44:59 crc kubenswrapper[4871]: E1126 05:44:59.249444 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4ae7d04f89c83f40d24079eb59cc37bea06daa2b09399a5acb003d216264e2f\": container with ID starting with c4ae7d04f89c83f40d24079eb59cc37bea06daa2b09399a5acb003d216264e2f not found: ID does not exist" containerID="c4ae7d04f89c83f40d24079eb59cc37bea06daa2b09399a5acb003d216264e2f" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.249495 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4ae7d04f89c83f40d24079eb59cc37bea06daa2b09399a5acb003d216264e2f"} err="failed to get container status \"c4ae7d04f89c83f40d24079eb59cc37bea06daa2b09399a5acb003d216264e2f\": rpc error: code = NotFound desc = could not find container \"c4ae7d04f89c83f40d24079eb59cc37bea06daa2b09399a5acb003d216264e2f\": container with ID starting with c4ae7d04f89c83f40d24079eb59cc37bea06daa2b09399a5acb003d216264e2f not found: ID does not exist" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.249518 4871 scope.go:117] "RemoveContainer" containerID="cbe54d9697268692a87e8638c65d42920b895611c17f85cef1bc6670f93edeac" Nov 26 05:44:59 crc kubenswrapper[4871]: E1126 05:44:59.249819 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cbe54d9697268692a87e8638c65d42920b895611c17f85cef1bc6670f93edeac\": container with ID starting with cbe54d9697268692a87e8638c65d42920b895611c17f85cef1bc6670f93edeac not found: ID does not exist" containerID="cbe54d9697268692a87e8638c65d42920b895611c17f85cef1bc6670f93edeac" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.249836 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cbe54d9697268692a87e8638c65d42920b895611c17f85cef1bc6670f93edeac"} err="failed to get container status \"cbe54d9697268692a87e8638c65d42920b895611c17f85cef1bc6670f93edeac\": rpc error: code = NotFound desc = could not find container \"cbe54d9697268692a87e8638c65d42920b895611c17f85cef1bc6670f93edeac\": container with ID starting with cbe54d9697268692a87e8638c65d42920b895611c17f85cef1bc6670f93edeac not found: ID does not exist" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.249850 4871 scope.go:117] "RemoveContainer" containerID="9abd53467965e5620a4e437d0c8a83aa5ab6bc0029e0629f4ee0ba9fe83d1794" Nov 26 05:44:59 crc kubenswrapper[4871]: E1126 05:44:59.250070 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9abd53467965e5620a4e437d0c8a83aa5ab6bc0029e0629f4ee0ba9fe83d1794\": container with ID starting with 9abd53467965e5620a4e437d0c8a83aa5ab6bc0029e0629f4ee0ba9fe83d1794 not found: ID does not exist" containerID="9abd53467965e5620a4e437d0c8a83aa5ab6bc0029e0629f4ee0ba9fe83d1794" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.250092 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9abd53467965e5620a4e437d0c8a83aa5ab6bc0029e0629f4ee0ba9fe83d1794"} err="failed to get container status \"9abd53467965e5620a4e437d0c8a83aa5ab6bc0029e0629f4ee0ba9fe83d1794\": rpc error: code = NotFound desc = could not find container \"9abd53467965e5620a4e437d0c8a83aa5ab6bc0029e0629f4ee0ba9fe83d1794\": container with ID starting with 9abd53467965e5620a4e437d0c8a83aa5ab6bc0029e0629f4ee0ba9fe83d1794 not found: ID does not exist" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.250108 4871 scope.go:117] "RemoveContainer" containerID="b7ca991c6e161910746bb8f6dd58276dd7ecdff6e5266dd6fb56dc67732661cb" Nov 26 05:44:59 crc kubenswrapper[4871]: E1126 05:44:59.250333 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7ca991c6e161910746bb8f6dd58276dd7ecdff6e5266dd6fb56dc67732661cb\": container with ID starting with b7ca991c6e161910746bb8f6dd58276dd7ecdff6e5266dd6fb56dc67732661cb not found: ID does not exist" containerID="b7ca991c6e161910746bb8f6dd58276dd7ecdff6e5266dd6fb56dc67732661cb" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.250349 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7ca991c6e161910746bb8f6dd58276dd7ecdff6e5266dd6fb56dc67732661cb"} err="failed to get container status \"b7ca991c6e161910746bb8f6dd58276dd7ecdff6e5266dd6fb56dc67732661cb\": rpc error: code = NotFound desc = could not find container \"b7ca991c6e161910746bb8f6dd58276dd7ecdff6e5266dd6fb56dc67732661cb\": container with ID starting with b7ca991c6e161910746bb8f6dd58276dd7ecdff6e5266dd6fb56dc67732661cb not found: ID does not exist" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.251176 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-config-data\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.251312 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-log-httpd\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.251483 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxlzv\" (UniqueName: \"kubernetes.io/projected/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-kube-api-access-cxlzv\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.251686 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.252304 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-run-httpd\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.253251 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.253434 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-scripts\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.355265 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.355698 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-scripts\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.356877 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-config-data\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.356983 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-log-httpd\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.357099 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxlzv\" (UniqueName: \"kubernetes.io/projected/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-kube-api-access-cxlzv\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.357243 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.357397 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-run-httpd\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.357881 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-run-httpd\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.358506 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-log-httpd\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.361877 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.362374 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-scripts\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.362890 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-config-data\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.365341 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.374575 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxlzv\" (UniqueName: \"kubernetes.io/projected/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-kube-api-access-cxlzv\") pod \"ceilometer-0\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.492585 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:44:59 crc kubenswrapper[4871]: I1126 05:44:59.807547 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-847fdf8fc-mswx4" Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.014231 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.090275 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f","Type":"ContainerStarted","Data":"bb4548ebbb6f788ae347c83dcf4be12510c90d5fee4a6ac427edfc529540572e"} Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.151631 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq"] Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.154729 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq" Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.157738 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.158895 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.178880 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq"] Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.282302 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gmk7\" (UniqueName: \"kubernetes.io/projected/149882f2-ae3b-4571-a1ad-cbed765c2c77-kube-api-access-4gmk7\") pod \"collect-profiles-29402265-nhkhq\" (UID: \"149882f2-ae3b-4571-a1ad-cbed765c2c77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq" Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.282498 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/149882f2-ae3b-4571-a1ad-cbed765c2c77-config-volume\") pod \"collect-profiles-29402265-nhkhq\" (UID: \"149882f2-ae3b-4571-a1ad-cbed765c2c77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq" Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.282664 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/149882f2-ae3b-4571-a1ad-cbed765c2c77-secret-volume\") pod \"collect-profiles-29402265-nhkhq\" (UID: \"149882f2-ae3b-4571-a1ad-cbed765c2c77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq" Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.384044 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/149882f2-ae3b-4571-a1ad-cbed765c2c77-config-volume\") pod \"collect-profiles-29402265-nhkhq\" (UID: \"149882f2-ae3b-4571-a1ad-cbed765c2c77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq" Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.384116 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/149882f2-ae3b-4571-a1ad-cbed765c2c77-secret-volume\") pod \"collect-profiles-29402265-nhkhq\" (UID: \"149882f2-ae3b-4571-a1ad-cbed765c2c77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq" Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.384199 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4gmk7\" (UniqueName: \"kubernetes.io/projected/149882f2-ae3b-4571-a1ad-cbed765c2c77-kube-api-access-4gmk7\") pod \"collect-profiles-29402265-nhkhq\" (UID: \"149882f2-ae3b-4571-a1ad-cbed765c2c77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq" Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.384980 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/149882f2-ae3b-4571-a1ad-cbed765c2c77-config-volume\") pod \"collect-profiles-29402265-nhkhq\" (UID: \"149882f2-ae3b-4571-a1ad-cbed765c2c77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq" Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.390028 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/149882f2-ae3b-4571-a1ad-cbed765c2c77-secret-volume\") pod \"collect-profiles-29402265-nhkhq\" (UID: \"149882f2-ae3b-4571-a1ad-cbed765c2c77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq" Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.402718 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gmk7\" (UniqueName: \"kubernetes.io/projected/149882f2-ae3b-4571-a1ad-cbed765c2c77-kube-api-access-4gmk7\") pod \"collect-profiles-29402265-nhkhq\" (UID: \"149882f2-ae3b-4571-a1ad-cbed765c2c77\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq" Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.524074 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b724414-8682-4e73-8b2d-305fce381613" path="/var/lib/kubelet/pods/9b724414-8682-4e73-8b2d-305fce381613/volumes" Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.608603 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq" Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.631421 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.632896 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-665fcf99fb-m82r7" Nov 26 05:45:00 crc kubenswrapper[4871]: I1126 05:45:00.663041 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 26 05:45:01 crc kubenswrapper[4871]: I1126 05:45:01.116261 4871 generic.go:334] "Generic (PLEG): container finished" podID="906807e1-f724-4ab4-9ccc-95656188890e" containerID="c0dee9dbf5b0d8070947de7e352e4d9d380476356ecdfa075feffa0ed0cfbdac" exitCode=1 Nov 26 05:45:01 crc kubenswrapper[4871]: I1126 05:45:01.116724 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"906807e1-f724-4ab4-9ccc-95656188890e","Type":"ContainerDied","Data":"c0dee9dbf5b0d8070947de7e352e4d9d380476356ecdfa075feffa0ed0cfbdac"} Nov 26 05:45:01 crc kubenswrapper[4871]: I1126 05:45:01.116798 4871 scope.go:117] "RemoveContainer" containerID="9e2d82019cfae3a801741429cd76d41e71f620675230c40d3af434b0678c5b24" Nov 26 05:45:01 crc kubenswrapper[4871]: I1126 05:45:01.117938 4871 scope.go:117] "RemoveContainer" containerID="c0dee9dbf5b0d8070947de7e352e4d9d380476356ecdfa075feffa0ed0cfbdac" Nov 26 05:45:01 crc kubenswrapper[4871]: E1126 05:45:01.118405 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(906807e1-f724-4ab4-9ccc-95656188890e)\"" pod="openstack/watcher-decision-engine-0" podUID="906807e1-f724-4ab4-9ccc-95656188890e" Nov 26 05:45:01 crc kubenswrapper[4871]: I1126 05:45:01.131066 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f","Type":"ContainerStarted","Data":"3d775446a8a741f53e6ba789e776178b1fabfe0339b847f393d993e6dadec0c2"} Nov 26 05:45:01 crc kubenswrapper[4871]: I1126 05:45:01.131124 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f","Type":"ContainerStarted","Data":"59273f91df935074b8f112bf9328da0e55926ca4ad0ec6d25f31b6dcfa2f5bde"} Nov 26 05:45:01 crc kubenswrapper[4871]: I1126 05:45:01.171788 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq"] Nov 26 05:45:01 crc kubenswrapper[4871]: E1126 05:45:01.780187 4871 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod149882f2_ae3b_4571_a1ad_cbed765c2c77.slice/crio-d053714db1665e915875858ece0e68e89ae5c5aea55a9824e7f07301df0b02dd.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod149882f2_ae3b_4571_a1ad_cbed765c2c77.slice/crio-conmon-d053714db1665e915875858ece0e68e89ae5c5aea55a9824e7f07301df0b02dd.scope\": RecentStats: unable to find data in memory cache]" Nov 26 05:45:01 crc kubenswrapper[4871]: I1126 05:45:01.930451 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 26 05:45:01 crc kubenswrapper[4871]: I1126 05:45:01.936881 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 05:45:01 crc kubenswrapper[4871]: I1126 05:45:01.942413 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 26 05:45:01 crc kubenswrapper[4871]: I1126 05:45:01.942476 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 26 05:45:01 crc kubenswrapper[4871]: I1126 05:45:01.947560 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-mzrhf" Nov 26 05:45:01 crc kubenswrapper[4871]: I1126 05:45:01.959932 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 05:45:02 crc kubenswrapper[4871]: I1126 05:45:02.129503 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7218b9c9-2508-46eb-8942-4c22b0c706cf-openstack-config\") pod \"openstackclient\" (UID: \"7218b9c9-2508-46eb-8942-4c22b0c706cf\") " pod="openstack/openstackclient" Nov 26 05:45:02 crc kubenswrapper[4871]: I1126 05:45:02.129641 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7218b9c9-2508-46eb-8942-4c22b0c706cf-openstack-config-secret\") pod \"openstackclient\" (UID: \"7218b9c9-2508-46eb-8942-4c22b0c706cf\") " pod="openstack/openstackclient" Nov 26 05:45:02 crc kubenswrapper[4871]: I1126 05:45:02.129731 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nh44w\" (UniqueName: \"kubernetes.io/projected/7218b9c9-2508-46eb-8942-4c22b0c706cf-kube-api-access-nh44w\") pod \"openstackclient\" (UID: \"7218b9c9-2508-46eb-8942-4c22b0c706cf\") " pod="openstack/openstackclient" Nov 26 05:45:02 crc kubenswrapper[4871]: I1126 05:45:02.129889 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7218b9c9-2508-46eb-8942-4c22b0c706cf-combined-ca-bundle\") pod \"openstackclient\" (UID: \"7218b9c9-2508-46eb-8942-4c22b0c706cf\") " pod="openstack/openstackclient" Nov 26 05:45:02 crc kubenswrapper[4871]: I1126 05:45:02.141173 4871 generic.go:334] "Generic (PLEG): container finished" podID="149882f2-ae3b-4571-a1ad-cbed765c2c77" containerID="d053714db1665e915875858ece0e68e89ae5c5aea55a9824e7f07301df0b02dd" exitCode=0 Nov 26 05:45:02 crc kubenswrapper[4871]: I1126 05:45:02.141431 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq" event={"ID":"149882f2-ae3b-4571-a1ad-cbed765c2c77","Type":"ContainerDied","Data":"d053714db1665e915875858ece0e68e89ae5c5aea55a9824e7f07301df0b02dd"} Nov 26 05:45:02 crc kubenswrapper[4871]: I1126 05:45:02.141486 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq" event={"ID":"149882f2-ae3b-4571-a1ad-cbed765c2c77","Type":"ContainerStarted","Data":"9a76c831945f2cbd3f87931d60a24eb9671a548cbc86644724c37f8e05d5ef33"} Nov 26 05:45:02 crc kubenswrapper[4871]: I1126 05:45:02.142782 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f","Type":"ContainerStarted","Data":"6d8ce20e186718d12d8e71c6c6f4df3f788fad04930913940cf0b5585ab721cd"} Nov 26 05:45:02 crc kubenswrapper[4871]: I1126 05:45:02.231642 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7218b9c9-2508-46eb-8942-4c22b0c706cf-openstack-config\") pod \"openstackclient\" (UID: \"7218b9c9-2508-46eb-8942-4c22b0c706cf\") " pod="openstack/openstackclient" Nov 26 05:45:02 crc kubenswrapper[4871]: I1126 05:45:02.231709 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7218b9c9-2508-46eb-8942-4c22b0c706cf-openstack-config-secret\") pod \"openstackclient\" (UID: \"7218b9c9-2508-46eb-8942-4c22b0c706cf\") " pod="openstack/openstackclient" Nov 26 05:45:02 crc kubenswrapper[4871]: I1126 05:45:02.231748 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nh44w\" (UniqueName: \"kubernetes.io/projected/7218b9c9-2508-46eb-8942-4c22b0c706cf-kube-api-access-nh44w\") pod \"openstackclient\" (UID: \"7218b9c9-2508-46eb-8942-4c22b0c706cf\") " pod="openstack/openstackclient" Nov 26 05:45:02 crc kubenswrapper[4871]: I1126 05:45:02.231863 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7218b9c9-2508-46eb-8942-4c22b0c706cf-combined-ca-bundle\") pod \"openstackclient\" (UID: \"7218b9c9-2508-46eb-8942-4c22b0c706cf\") " pod="openstack/openstackclient" Nov 26 05:45:02 crc kubenswrapper[4871]: I1126 05:45:02.232924 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7218b9c9-2508-46eb-8942-4c22b0c706cf-openstack-config\") pod \"openstackclient\" (UID: \"7218b9c9-2508-46eb-8942-4c22b0c706cf\") " pod="openstack/openstackclient" Nov 26 05:45:02 crc kubenswrapper[4871]: I1126 05:45:02.238154 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7218b9c9-2508-46eb-8942-4c22b0c706cf-combined-ca-bundle\") pod \"openstackclient\" (UID: \"7218b9c9-2508-46eb-8942-4c22b0c706cf\") " pod="openstack/openstackclient" Nov 26 05:45:02 crc kubenswrapper[4871]: I1126 05:45:02.243251 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7218b9c9-2508-46eb-8942-4c22b0c706cf-openstack-config-secret\") pod \"openstackclient\" (UID: \"7218b9c9-2508-46eb-8942-4c22b0c706cf\") " pod="openstack/openstackclient" Nov 26 05:45:02 crc kubenswrapper[4871]: I1126 05:45:02.250340 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nh44w\" (UniqueName: \"kubernetes.io/projected/7218b9c9-2508-46eb-8942-4c22b0c706cf-kube-api-access-nh44w\") pod \"openstackclient\" (UID: \"7218b9c9-2508-46eb-8942-4c22b0c706cf\") " pod="openstack/openstackclient" Nov 26 05:45:02 crc kubenswrapper[4871]: I1126 05:45:02.263834 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 05:45:02 crc kubenswrapper[4871]: I1126 05:45:02.726061 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 05:45:03 crc kubenswrapper[4871]: I1126 05:45:03.154386 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"7218b9c9-2508-46eb-8942-4c22b0c706cf","Type":"ContainerStarted","Data":"3deb4c2928253add231a96c7dc4f74ab35a82f22cb85a1ba12bf0c223495bdd8"} Nov 26 05:45:03 crc kubenswrapper[4871]: I1126 05:45:03.577408 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq" Nov 26 05:45:03 crc kubenswrapper[4871]: I1126 05:45:03.762797 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/149882f2-ae3b-4571-a1ad-cbed765c2c77-config-volume\") pod \"149882f2-ae3b-4571-a1ad-cbed765c2c77\" (UID: \"149882f2-ae3b-4571-a1ad-cbed765c2c77\") " Nov 26 05:45:03 crc kubenswrapper[4871]: I1126 05:45:03.763246 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/149882f2-ae3b-4571-a1ad-cbed765c2c77-secret-volume\") pod \"149882f2-ae3b-4571-a1ad-cbed765c2c77\" (UID: \"149882f2-ae3b-4571-a1ad-cbed765c2c77\") " Nov 26 05:45:03 crc kubenswrapper[4871]: I1126 05:45:03.763277 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4gmk7\" (UniqueName: \"kubernetes.io/projected/149882f2-ae3b-4571-a1ad-cbed765c2c77-kube-api-access-4gmk7\") pod \"149882f2-ae3b-4571-a1ad-cbed765c2c77\" (UID: \"149882f2-ae3b-4571-a1ad-cbed765c2c77\") " Nov 26 05:45:03 crc kubenswrapper[4871]: I1126 05:45:03.763477 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/149882f2-ae3b-4571-a1ad-cbed765c2c77-config-volume" (OuterVolumeSpecName: "config-volume") pod "149882f2-ae3b-4571-a1ad-cbed765c2c77" (UID: "149882f2-ae3b-4571-a1ad-cbed765c2c77"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:45:03 crc kubenswrapper[4871]: I1126 05:45:03.763987 4871 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/149882f2-ae3b-4571-a1ad-cbed765c2c77-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:03 crc kubenswrapper[4871]: I1126 05:45:03.770549 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/149882f2-ae3b-4571-a1ad-cbed765c2c77-kube-api-access-4gmk7" (OuterVolumeSpecName: "kube-api-access-4gmk7") pod "149882f2-ae3b-4571-a1ad-cbed765c2c77" (UID: "149882f2-ae3b-4571-a1ad-cbed765c2c77"). InnerVolumeSpecName "kube-api-access-4gmk7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:45:03 crc kubenswrapper[4871]: I1126 05:45:03.782674 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/149882f2-ae3b-4571-a1ad-cbed765c2c77-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "149882f2-ae3b-4571-a1ad-cbed765c2c77" (UID: "149882f2-ae3b-4571-a1ad-cbed765c2c77"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:03 crc kubenswrapper[4871]: I1126 05:45:03.865212 4871 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/149882f2-ae3b-4571-a1ad-cbed765c2c77-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:03 crc kubenswrapper[4871]: I1126 05:45:03.865253 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4gmk7\" (UniqueName: \"kubernetes.io/projected/149882f2-ae3b-4571-a1ad-cbed765c2c77-kube-api-access-4gmk7\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:04 crc kubenswrapper[4871]: I1126 05:45:04.152982 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 26 05:45:04 crc kubenswrapper[4871]: I1126 05:45:04.153037 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 26 05:45:04 crc kubenswrapper[4871]: I1126 05:45:04.153762 4871 scope.go:117] "RemoveContainer" containerID="c0dee9dbf5b0d8070947de7e352e4d9d380476356ecdfa075feffa0ed0cfbdac" Nov 26 05:45:04 crc kubenswrapper[4871]: E1126 05:45:04.154118 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(906807e1-f724-4ab4-9ccc-95656188890e)\"" pod="openstack/watcher-decision-engine-0" podUID="906807e1-f724-4ab4-9ccc-95656188890e" Nov 26 05:45:04 crc kubenswrapper[4871]: I1126 05:45:04.186057 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq" event={"ID":"149882f2-ae3b-4571-a1ad-cbed765c2c77","Type":"ContainerDied","Data":"9a76c831945f2cbd3f87931d60a24eb9671a548cbc86644724c37f8e05d5ef33"} Nov 26 05:45:04 crc kubenswrapper[4871]: I1126 05:45:04.186100 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a76c831945f2cbd3f87931d60a24eb9671a548cbc86644724c37f8e05d5ef33" Nov 26 05:45:04 crc kubenswrapper[4871]: I1126 05:45:04.186163 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq" Nov 26 05:45:04 crc kubenswrapper[4871]: I1126 05:45:04.199634 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f","Type":"ContainerStarted","Data":"2892acd953792c2dd869341034018fa9dd20429545f15b50f071077d8c05183a"} Nov 26 05:45:04 crc kubenswrapper[4871]: I1126 05:45:04.201013 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 05:45:04 crc kubenswrapper[4871]: I1126 05:45:04.241346 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.708807047 podStartE2EDuration="5.241319758s" podCreationTimestamp="2025-11-26 05:44:59 +0000 UTC" firstStartedPulling="2025-11-26 05:44:59.96148595 +0000 UTC m=+1158.144537536" lastFinishedPulling="2025-11-26 05:45:03.493998661 +0000 UTC m=+1161.677050247" observedRunningTime="2025-11-26 05:45:04.226726956 +0000 UTC m=+1162.409778542" watchObservedRunningTime="2025-11-26 05:45:04.241319758 +0000 UTC m=+1162.424371344" Nov 26 05:45:04 crc kubenswrapper[4871]: I1126 05:45:04.743761 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 05:45:04 crc kubenswrapper[4871]: I1126 05:45:04.744035 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 05:45:04 crc kubenswrapper[4871]: I1126 05:45:04.778218 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 05:45:04 crc kubenswrapper[4871]: I1126 05:45:04.790675 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 05:45:05 crc kubenswrapper[4871]: I1126 05:45:05.218275 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 05:45:05 crc kubenswrapper[4871]: I1126 05:45:05.218318 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 05:45:05 crc kubenswrapper[4871]: I1126 05:45:05.640856 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:05 crc kubenswrapper[4871]: I1126 05:45:05.641211 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:05 crc kubenswrapper[4871]: I1126 05:45:05.691706 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:05 crc kubenswrapper[4871]: I1126 05:45:05.716866 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:06 crc kubenswrapper[4871]: I1126 05:45:06.099221 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 26 05:45:06 crc kubenswrapper[4871]: I1126 05:45:06.224957 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:06 crc kubenswrapper[4871]: I1126 05:45:06.225014 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:06 crc kubenswrapper[4871]: I1126 05:45:06.447627 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 26 05:45:06 crc kubenswrapper[4871]: I1126 05:45:06.971346 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-8665945b44-wbcwv" podUID="812fa0f1-c216-4db1-b3e6-cfa862b8cb93" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.163:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.163:8443: connect: connection refused" Nov 26 05:45:06 crc kubenswrapper[4871]: I1126 05:45:06.971720 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.232594 4871 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.232617 4871 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.413629 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.413886 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerName="ceilometer-central-agent" containerID="cri-o://59273f91df935074b8f112bf9328da0e55926ca4ad0ec6d25f31b6dcfa2f5bde" gracePeriod=30 Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.413958 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerName="proxy-httpd" containerID="cri-o://2892acd953792c2dd869341034018fa9dd20429545f15b50f071077d8c05183a" gracePeriod=30 Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.414005 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerName="sg-core" containerID="cri-o://6d8ce20e186718d12d8e71c6c6f4df3f788fad04930913940cf0b5585ab721cd" gracePeriod=30 Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.414054 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerName="ceilometer-notification-agent" containerID="cri-o://3d775446a8a741f53e6ba789e776178b1fabfe0339b847f393d993e6dadec0c2" gracePeriod=30 Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.639898 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-78dd8485c9-fx6sv"] Nov 26 05:45:07 crc kubenswrapper[4871]: E1126 05:45:07.640539 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="149882f2-ae3b-4571-a1ad-cbed765c2c77" containerName="collect-profiles" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.640553 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="149882f2-ae3b-4571-a1ad-cbed765c2c77" containerName="collect-profiles" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.640743 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="149882f2-ae3b-4571-a1ad-cbed765c2c77" containerName="collect-profiles" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.641734 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.651481 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.651671 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.651769 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.655841 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-78dd8485c9-fx6sv"] Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.737559 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fcca2594-c385-49cd-8354-7e4fcfab96c8-etc-swift\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.737607 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fcca2594-c385-49cd-8354-7e4fcfab96c8-log-httpd\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.737646 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pn5np\" (UniqueName: \"kubernetes.io/projected/fcca2594-c385-49cd-8354-7e4fcfab96c8-kube-api-access-pn5np\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.737726 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fcca2594-c385-49cd-8354-7e4fcfab96c8-config-data\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.737754 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fcca2594-c385-49cd-8354-7e4fcfab96c8-internal-tls-certs\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.737771 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fcca2594-c385-49cd-8354-7e4fcfab96c8-run-httpd\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.737788 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fcca2594-c385-49cd-8354-7e4fcfab96c8-combined-ca-bundle\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.737830 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fcca2594-c385-49cd-8354-7e4fcfab96c8-public-tls-certs\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.839175 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fcca2594-c385-49cd-8354-7e4fcfab96c8-log-httpd\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.839313 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pn5np\" (UniqueName: \"kubernetes.io/projected/fcca2594-c385-49cd-8354-7e4fcfab96c8-kube-api-access-pn5np\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.839691 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fcca2594-c385-49cd-8354-7e4fcfab96c8-log-httpd\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.839865 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fcca2594-c385-49cd-8354-7e4fcfab96c8-config-data\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.839926 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fcca2594-c385-49cd-8354-7e4fcfab96c8-internal-tls-certs\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.839969 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fcca2594-c385-49cd-8354-7e4fcfab96c8-run-httpd\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.840344 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fcca2594-c385-49cd-8354-7e4fcfab96c8-run-httpd\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.839992 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fcca2594-c385-49cd-8354-7e4fcfab96c8-combined-ca-bundle\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.840671 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fcca2594-c385-49cd-8354-7e4fcfab96c8-public-tls-certs\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.840731 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fcca2594-c385-49cd-8354-7e4fcfab96c8-etc-swift\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.845634 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fcca2594-c385-49cd-8354-7e4fcfab96c8-internal-tls-certs\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.845884 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fcca2594-c385-49cd-8354-7e4fcfab96c8-config-data\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.849376 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fcca2594-c385-49cd-8354-7e4fcfab96c8-public-tls-certs\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.858021 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/fcca2594-c385-49cd-8354-7e4fcfab96c8-etc-swift\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.870286 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fcca2594-c385-49cd-8354-7e4fcfab96c8-combined-ca-bundle\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:07 crc kubenswrapper[4871]: I1126 05:45:07.913215 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pn5np\" (UniqueName: \"kubernetes.io/projected/fcca2594-c385-49cd-8354-7e4fcfab96c8-kube-api-access-pn5np\") pod \"swift-proxy-78dd8485c9-fx6sv\" (UID: \"fcca2594-c385-49cd-8354-7e4fcfab96c8\") " pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.031578 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.267383 4871 generic.go:334] "Generic (PLEG): container finished" podID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerID="2892acd953792c2dd869341034018fa9dd20429545f15b50f071077d8c05183a" exitCode=0 Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.267656 4871 generic.go:334] "Generic (PLEG): container finished" podID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerID="6d8ce20e186718d12d8e71c6c6f4df3f788fad04930913940cf0b5585ab721cd" exitCode=2 Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.267665 4871 generic.go:334] "Generic (PLEG): container finished" podID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerID="3d775446a8a741f53e6ba789e776178b1fabfe0339b847f393d993e6dadec0c2" exitCode=0 Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.267672 4871 generic.go:334] "Generic (PLEG): container finished" podID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerID="59273f91df935074b8f112bf9328da0e55926ca4ad0ec6d25f31b6dcfa2f5bde" exitCode=0 Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.267690 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f","Type":"ContainerDied","Data":"2892acd953792c2dd869341034018fa9dd20429545f15b50f071077d8c05183a"} Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.267715 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f","Type":"ContainerDied","Data":"6d8ce20e186718d12d8e71c6c6f4df3f788fad04930913940cf0b5585ab721cd"} Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.267726 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f","Type":"ContainerDied","Data":"3d775446a8a741f53e6ba789e776178b1fabfe0339b847f393d993e6dadec0c2"} Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.267734 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f","Type":"ContainerDied","Data":"59273f91df935074b8f112bf9328da0e55926ca4ad0ec6d25f31b6dcfa2f5bde"} Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.373789 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.455072 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-run-httpd\") pod \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.455142 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cxlzv\" (UniqueName: \"kubernetes.io/projected/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-kube-api-access-cxlzv\") pod \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.455267 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-config-data\") pod \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.455309 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-log-httpd\") pod \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.455324 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-combined-ca-bundle\") pod \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.455381 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-scripts\") pod \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.455452 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-sg-core-conf-yaml\") pod \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\" (UID: \"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f\") " Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.459732 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" (UID: "17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.462174 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" (UID: "17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.462326 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-kube-api-access-cxlzv" (OuterVolumeSpecName: "kube-api-access-cxlzv") pod "17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" (UID: "17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f"). InnerVolumeSpecName "kube-api-access-cxlzv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.473682 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-scripts" (OuterVolumeSpecName: "scripts") pod "17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" (UID: "17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.523033 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" (UID: "17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.558725 4871 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.558761 4871 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.558771 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cxlzv\" (UniqueName: \"kubernetes.io/projected/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-kube-api-access-cxlzv\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.558782 4871 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.558790 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.579621 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" (UID: "17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.660086 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.679149 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-config-data" (OuterVolumeSpecName: "config-data") pod "17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" (UID: "17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:08 crc kubenswrapper[4871]: W1126 05:45:08.692583 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfcca2594_c385_49cd_8354_7e4fcfab96c8.slice/crio-bdacbdd434ca9c68d45d7354ca10f48aa5b3c235aa27edaf10a41edec5e1b7e1 WatchSource:0}: Error finding container bdacbdd434ca9c68d45d7354ca10f48aa5b3c235aa27edaf10a41edec5e1b7e1: Status 404 returned error can't find the container with id bdacbdd434ca9c68d45d7354ca10f48aa5b3c235aa27edaf10a41edec5e1b7e1 Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.764238 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.798540 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-78dd8485c9-fx6sv"] Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.863007 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.863392 4871 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.899144 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.899265 4871 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 05:45:08 crc kubenswrapper[4871]: I1126 05:45:08.940022 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.126095 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.305307 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-78dd8485c9-fx6sv" event={"ID":"fcca2594-c385-49cd-8354-7e4fcfab96c8","Type":"ContainerStarted","Data":"8152c8e3ab77e1a4ea2eaa51e438a637aee27460c7fc2da06f8678fe4b4004f6"} Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.305365 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-78dd8485c9-fx6sv" event={"ID":"fcca2594-c385-49cd-8354-7e4fcfab96c8","Type":"ContainerStarted","Data":"9790539fc471c0ec65efdbf8fc496c68d130812ccd35e75319aabb1e96c4e95b"} Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.305374 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-78dd8485c9-fx6sv" event={"ID":"fcca2594-c385-49cd-8354-7e4fcfab96c8","Type":"ContainerStarted","Data":"bdacbdd434ca9c68d45d7354ca10f48aa5b3c235aa27edaf10a41edec5e1b7e1"} Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.305639 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.305697 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.321844 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.323570 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f","Type":"ContainerDied","Data":"bb4548ebbb6f788ae347c83dcf4be12510c90d5fee4a6ac427edfc529540572e"} Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.323686 4871 scope.go:117] "RemoveContainer" containerID="2892acd953792c2dd869341034018fa9dd20429545f15b50f071077d8c05183a" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.327394 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-78dd8485c9-fx6sv" podStartSLOduration=2.327378094 podStartE2EDuration="2.327378094s" podCreationTimestamp="2025-11-26 05:45:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:45:09.322076314 +0000 UTC m=+1167.505127900" watchObservedRunningTime="2025-11-26 05:45:09.327378094 +0000 UTC m=+1167.510429680" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.384581 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.394656 4871 scope.go:117] "RemoveContainer" containerID="6d8ce20e186718d12d8e71c6c6f4df3f788fad04930913940cf0b5585ab721cd" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.397603 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.405354 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:09 crc kubenswrapper[4871]: E1126 05:45:09.405866 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerName="ceilometer-notification-agent" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.405890 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerName="ceilometer-notification-agent" Nov 26 05:45:09 crc kubenswrapper[4871]: E1126 05:45:09.405906 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerName="proxy-httpd" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.405915 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerName="proxy-httpd" Nov 26 05:45:09 crc kubenswrapper[4871]: E1126 05:45:09.405934 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerName="sg-core" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.405942 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerName="sg-core" Nov 26 05:45:09 crc kubenswrapper[4871]: E1126 05:45:09.405962 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerName="ceilometer-central-agent" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.405969 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerName="ceilometer-central-agent" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.406212 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerName="ceilometer-central-agent" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.406250 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerName="proxy-httpd" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.406271 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerName="ceilometer-notification-agent" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.406282 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" containerName="sg-core" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.408064 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.410982 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.412792 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.419445 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.442744 4871 scope.go:117] "RemoveContainer" containerID="3d775446a8a741f53e6ba789e776178b1fabfe0339b847f393d993e6dadec0c2" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.479195 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5405fb24-87e0-4f32-ac6f-0f59338056ae-log-httpd\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.479237 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.479318 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-scripts\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.479354 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.479381 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-config-data\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.479405 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5405fb24-87e0-4f32-ac6f-0f59338056ae-run-httpd\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.479454 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmsdv\" (UniqueName: \"kubernetes.io/projected/5405fb24-87e0-4f32-ac6f-0f59338056ae-kube-api-access-nmsdv\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.497715 4871 scope.go:117] "RemoveContainer" containerID="59273f91df935074b8f112bf9328da0e55926ca4ad0ec6d25f31b6dcfa2f5bde" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.580836 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.580913 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-config-data\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.580935 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5405fb24-87e0-4f32-ac6f-0f59338056ae-run-httpd\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.580991 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmsdv\" (UniqueName: \"kubernetes.io/projected/5405fb24-87e0-4f32-ac6f-0f59338056ae-kube-api-access-nmsdv\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.581044 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5405fb24-87e0-4f32-ac6f-0f59338056ae-log-httpd\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.581066 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.581132 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-scripts\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.581902 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5405fb24-87e0-4f32-ac6f-0f59338056ae-run-httpd\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.583514 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5405fb24-87e0-4f32-ac6f-0f59338056ae-log-httpd\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.588625 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-config-data\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.589604 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.592436 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.600211 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmsdv\" (UniqueName: \"kubernetes.io/projected/5405fb24-87e0-4f32-ac6f-0f59338056ae-kube-api-access-nmsdv\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.601292 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-scripts\") pod \"ceilometer-0\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " pod="openstack/ceilometer-0" Nov 26 05:45:09 crc kubenswrapper[4871]: I1126 05:45:09.739694 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:45:10 crc kubenswrapper[4871]: I1126 05:45:10.279363 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:10 crc kubenswrapper[4871]: I1126 05:45:10.333365 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5405fb24-87e0-4f32-ac6f-0f59338056ae","Type":"ContainerStarted","Data":"c38b6a399abdc6f6f6c1bf3fcb9e3ee05ddce33516652256db8771fc13d5b954"} Nov 26 05:45:10 crc kubenswrapper[4871]: I1126 05:45:10.529063 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f" path="/var/lib/kubelet/pods/17f8cc63-2e7c-44da-a13d-5c3af9b7ec0f/volumes" Nov 26 05:45:10 crc kubenswrapper[4871]: I1126 05:45:10.691392 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:11 crc kubenswrapper[4871]: I1126 05:45:11.343598 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5405fb24-87e0-4f32-ac6f-0f59338056ae","Type":"ContainerStarted","Data":"3666c1504d2a15b696f3425c3ca8c4dc6b00b50ba54bf8ca2bb81945b32241a7"} Nov 26 05:45:11 crc kubenswrapper[4871]: I1126 05:45:11.343913 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5405fb24-87e0-4f32-ac6f-0f59338056ae","Type":"ContainerStarted","Data":"d7ac3c5b17165b94b45df75a484e8a4884eae0972693cf02e24b2da9617517f2"} Nov 26 05:45:11 crc kubenswrapper[4871]: I1126 05:45:11.873274 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-rswx2"] Nov 26 05:45:11 crc kubenswrapper[4871]: I1126 05:45:11.875187 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rswx2" Nov 26 05:45:11 crc kubenswrapper[4871]: I1126 05:45:11.890500 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-rswx2"] Nov 26 05:45:11 crc kubenswrapper[4871]: I1126 05:45:11.936355 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-st47d\" (UniqueName: \"kubernetes.io/projected/7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc-kube-api-access-st47d\") pod \"nova-api-db-create-rswx2\" (UID: \"7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc\") " pod="openstack/nova-api-db-create-rswx2" Nov 26 05:45:11 crc kubenswrapper[4871]: I1126 05:45:11.936422 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc-operator-scripts\") pod \"nova-api-db-create-rswx2\" (UID: \"7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc\") " pod="openstack/nova-api-db-create-rswx2" Nov 26 05:45:11 crc kubenswrapper[4871]: I1126 05:45:11.998471 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-rwvqt"] Nov 26 05:45:11 crc kubenswrapper[4871]: I1126 05:45:11.999766 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-rwvqt" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.008665 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-c975-account-create-update-767bz"] Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.010561 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c975-account-create-update-767bz" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.012563 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.025907 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-c975-account-create-update-767bz"] Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.038031 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-st47d\" (UniqueName: \"kubernetes.io/projected/7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc-kube-api-access-st47d\") pod \"nova-api-db-create-rswx2\" (UID: \"7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc\") " pod="openstack/nova-api-db-create-rswx2" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.038117 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc-operator-scripts\") pod \"nova-api-db-create-rswx2\" (UID: \"7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc\") " pod="openstack/nova-api-db-create-rswx2" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.038165 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e161c874-eca9-4f95-8419-660b27e5d21e-operator-scripts\") pod \"nova-cell0-db-create-rwvqt\" (UID: \"e161c874-eca9-4f95-8419-660b27e5d21e\") " pod="openstack/nova-cell0-db-create-rwvqt" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.038200 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/32032cf4-30d8-45c4-a12e-f1e79eda1c52-operator-scripts\") pod \"nova-api-c975-account-create-update-767bz\" (UID: \"32032cf4-30d8-45c4-a12e-f1e79eda1c52\") " pod="openstack/nova-api-c975-account-create-update-767bz" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.038234 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7qpl\" (UniqueName: \"kubernetes.io/projected/e161c874-eca9-4f95-8419-660b27e5d21e-kube-api-access-h7qpl\") pod \"nova-cell0-db-create-rwvqt\" (UID: \"e161c874-eca9-4f95-8419-660b27e5d21e\") " pod="openstack/nova-cell0-db-create-rwvqt" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.038330 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27gl7\" (UniqueName: \"kubernetes.io/projected/32032cf4-30d8-45c4-a12e-f1e79eda1c52-kube-api-access-27gl7\") pod \"nova-api-c975-account-create-update-767bz\" (UID: \"32032cf4-30d8-45c4-a12e-f1e79eda1c52\") " pod="openstack/nova-api-c975-account-create-update-767bz" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.039453 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc-operator-scripts\") pod \"nova-api-db-create-rswx2\" (UID: \"7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc\") " pod="openstack/nova-api-db-create-rswx2" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.041103 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-rwvqt"] Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.078238 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-st47d\" (UniqueName: \"kubernetes.io/projected/7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc-kube-api-access-st47d\") pod \"nova-api-db-create-rswx2\" (UID: \"7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc\") " pod="openstack/nova-api-db-create-rswx2" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.117781 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-fg97c"] Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.120300 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-fg97c" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.128392 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-fg97c"] Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.140495 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27gl7\" (UniqueName: \"kubernetes.io/projected/32032cf4-30d8-45c4-a12e-f1e79eda1c52-kube-api-access-27gl7\") pod \"nova-api-c975-account-create-update-767bz\" (UID: \"32032cf4-30d8-45c4-a12e-f1e79eda1c52\") " pod="openstack/nova-api-c975-account-create-update-767bz" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.140761 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50e575b9-71c6-466e-993f-12c04b2834db-operator-scripts\") pod \"nova-cell1-db-create-fg97c\" (UID: \"50e575b9-71c6-466e-993f-12c04b2834db\") " pod="openstack/nova-cell1-db-create-fg97c" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.140936 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phptp\" (UniqueName: \"kubernetes.io/projected/50e575b9-71c6-466e-993f-12c04b2834db-kube-api-access-phptp\") pod \"nova-cell1-db-create-fg97c\" (UID: \"50e575b9-71c6-466e-993f-12c04b2834db\") " pod="openstack/nova-cell1-db-create-fg97c" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.141077 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e161c874-eca9-4f95-8419-660b27e5d21e-operator-scripts\") pod \"nova-cell0-db-create-rwvqt\" (UID: \"e161c874-eca9-4f95-8419-660b27e5d21e\") " pod="openstack/nova-cell0-db-create-rwvqt" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.141204 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/32032cf4-30d8-45c4-a12e-f1e79eda1c52-operator-scripts\") pod \"nova-api-c975-account-create-update-767bz\" (UID: \"32032cf4-30d8-45c4-a12e-f1e79eda1c52\") " pod="openstack/nova-api-c975-account-create-update-767bz" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.141969 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7qpl\" (UniqueName: \"kubernetes.io/projected/e161c874-eca9-4f95-8419-660b27e5d21e-kube-api-access-h7qpl\") pod \"nova-cell0-db-create-rwvqt\" (UID: \"e161c874-eca9-4f95-8419-660b27e5d21e\") " pod="openstack/nova-cell0-db-create-rwvqt" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.141972 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e161c874-eca9-4f95-8419-660b27e5d21e-operator-scripts\") pod \"nova-cell0-db-create-rwvqt\" (UID: \"e161c874-eca9-4f95-8419-660b27e5d21e\") " pod="openstack/nova-cell0-db-create-rwvqt" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.141917 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/32032cf4-30d8-45c4-a12e-f1e79eda1c52-operator-scripts\") pod \"nova-api-c975-account-create-update-767bz\" (UID: \"32032cf4-30d8-45c4-a12e-f1e79eda1c52\") " pod="openstack/nova-api-c975-account-create-update-767bz" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.159905 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27gl7\" (UniqueName: \"kubernetes.io/projected/32032cf4-30d8-45c4-a12e-f1e79eda1c52-kube-api-access-27gl7\") pod \"nova-api-c975-account-create-update-767bz\" (UID: \"32032cf4-30d8-45c4-a12e-f1e79eda1c52\") " pod="openstack/nova-api-c975-account-create-update-767bz" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.180370 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-38ab-account-create-update-5728z"] Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.181785 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-38ab-account-create-update-5728z" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.186739 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.187709 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7qpl\" (UniqueName: \"kubernetes.io/projected/e161c874-eca9-4f95-8419-660b27e5d21e-kube-api-access-h7qpl\") pod \"nova-cell0-db-create-rwvqt\" (UID: \"e161c874-eca9-4f95-8419-660b27e5d21e\") " pod="openstack/nova-cell0-db-create-rwvqt" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.191158 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rswx2" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.195972 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-38ab-account-create-update-5728z"] Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.244699 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2-operator-scripts\") pod \"nova-cell0-38ab-account-create-update-5728z\" (UID: \"cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2\") " pod="openstack/nova-cell0-38ab-account-create-update-5728z" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.245038 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50e575b9-71c6-466e-993f-12c04b2834db-operator-scripts\") pod \"nova-cell1-db-create-fg97c\" (UID: \"50e575b9-71c6-466e-993f-12c04b2834db\") " pod="openstack/nova-cell1-db-create-fg97c" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.245205 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phptp\" (UniqueName: \"kubernetes.io/projected/50e575b9-71c6-466e-993f-12c04b2834db-kube-api-access-phptp\") pod \"nova-cell1-db-create-fg97c\" (UID: \"50e575b9-71c6-466e-993f-12c04b2834db\") " pod="openstack/nova-cell1-db-create-fg97c" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.245364 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xn7br\" (UniqueName: \"kubernetes.io/projected/cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2-kube-api-access-xn7br\") pod \"nova-cell0-38ab-account-create-update-5728z\" (UID: \"cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2\") " pod="openstack/nova-cell0-38ab-account-create-update-5728z" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.246652 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50e575b9-71c6-466e-993f-12c04b2834db-operator-scripts\") pod \"nova-cell1-db-create-fg97c\" (UID: \"50e575b9-71c6-466e-993f-12c04b2834db\") " pod="openstack/nova-cell1-db-create-fg97c" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.265415 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phptp\" (UniqueName: \"kubernetes.io/projected/50e575b9-71c6-466e-993f-12c04b2834db-kube-api-access-phptp\") pod \"nova-cell1-db-create-fg97c\" (UID: \"50e575b9-71c6-466e-993f-12c04b2834db\") " pod="openstack/nova-cell1-db-create-fg97c" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.328396 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-rwvqt" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.341910 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c975-account-create-update-767bz" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.346658 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xn7br\" (UniqueName: \"kubernetes.io/projected/cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2-kube-api-access-xn7br\") pod \"nova-cell0-38ab-account-create-update-5728z\" (UID: \"cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2\") " pod="openstack/nova-cell0-38ab-account-create-update-5728z" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.346792 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2-operator-scripts\") pod \"nova-cell0-38ab-account-create-update-5728z\" (UID: \"cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2\") " pod="openstack/nova-cell0-38ab-account-create-update-5728z" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.347590 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2-operator-scripts\") pod \"nova-cell0-38ab-account-create-update-5728z\" (UID: \"cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2\") " pod="openstack/nova-cell0-38ab-account-create-update-5728z" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.378697 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xn7br\" (UniqueName: \"kubernetes.io/projected/cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2-kube-api-access-xn7br\") pod \"nova-cell0-38ab-account-create-update-5728z\" (UID: \"cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2\") " pod="openstack/nova-cell0-38ab-account-create-update-5728z" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.396625 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-5174-account-create-update-j9rcn"] Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.398850 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5174-account-create-update-j9rcn" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.401113 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.448553 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4rfx\" (UniqueName: \"kubernetes.io/projected/b91e09cf-5ae8-4288-a212-d52274d5ef05-kube-api-access-v4rfx\") pod \"nova-cell1-5174-account-create-update-j9rcn\" (UID: \"b91e09cf-5ae8-4288-a212-d52274d5ef05\") " pod="openstack/nova-cell1-5174-account-create-update-j9rcn" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.448880 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b91e09cf-5ae8-4288-a212-d52274d5ef05-operator-scripts\") pod \"nova-cell1-5174-account-create-update-j9rcn\" (UID: \"b91e09cf-5ae8-4288-a212-d52274d5ef05\") " pod="openstack/nova-cell1-5174-account-create-update-j9rcn" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.453054 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-5174-account-create-update-j9rcn"] Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.463200 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-fg97c" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.551175 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4rfx\" (UniqueName: \"kubernetes.io/projected/b91e09cf-5ae8-4288-a212-d52274d5ef05-kube-api-access-v4rfx\") pod \"nova-cell1-5174-account-create-update-j9rcn\" (UID: \"b91e09cf-5ae8-4288-a212-d52274d5ef05\") " pod="openstack/nova-cell1-5174-account-create-update-j9rcn" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.551298 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b91e09cf-5ae8-4288-a212-d52274d5ef05-operator-scripts\") pod \"nova-cell1-5174-account-create-update-j9rcn\" (UID: \"b91e09cf-5ae8-4288-a212-d52274d5ef05\") " pod="openstack/nova-cell1-5174-account-create-update-j9rcn" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.552104 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b91e09cf-5ae8-4288-a212-d52274d5ef05-operator-scripts\") pod \"nova-cell1-5174-account-create-update-j9rcn\" (UID: \"b91e09cf-5ae8-4288-a212-d52274d5ef05\") " pod="openstack/nova-cell1-5174-account-create-update-j9rcn" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.569053 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-38ab-account-create-update-5728z" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.569098 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4rfx\" (UniqueName: \"kubernetes.io/projected/b91e09cf-5ae8-4288-a212-d52274d5ef05-kube-api-access-v4rfx\") pod \"nova-cell1-5174-account-create-update-j9rcn\" (UID: \"b91e09cf-5ae8-4288-a212-d52274d5ef05\") " pod="openstack/nova-cell1-5174-account-create-update-j9rcn" Nov 26 05:45:12 crc kubenswrapper[4871]: I1126 05:45:12.737735 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5174-account-create-update-j9rcn" Nov 26 05:45:13 crc kubenswrapper[4871]: I1126 05:45:13.042330 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:14 crc kubenswrapper[4871]: I1126 05:45:14.152844 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 26 05:45:14 crc kubenswrapper[4871]: I1126 05:45:14.153196 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 26 05:45:14 crc kubenswrapper[4871]: I1126 05:45:14.154154 4871 scope.go:117] "RemoveContainer" containerID="c0dee9dbf5b0d8070947de7e352e4d9d380476356ecdfa075feffa0ed0cfbdac" Nov 26 05:45:14 crc kubenswrapper[4871]: E1126 05:45:14.154393 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with CrashLoopBackOff: \"back-off 20s restarting failed container=watcher-decision-engine pod=watcher-decision-engine-0_openstack(906807e1-f724-4ab4-9ccc-95656188890e)\"" pod="openstack/watcher-decision-engine-0" podUID="906807e1-f724-4ab4-9ccc-95656188890e" Nov 26 05:45:15 crc kubenswrapper[4871]: I1126 05:45:15.404578 4871 generic.go:334] "Generic (PLEG): container finished" podID="812fa0f1-c216-4db1-b3e6-cfa862b8cb93" containerID="2f753b791176f79d2770b9ecafcbce795dfc8ecb07673c67a3c4df5b21ef9c16" exitCode=137 Nov 26 05:45:15 crc kubenswrapper[4871]: I1126 05:45:15.404901 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8665945b44-wbcwv" event={"ID":"812fa0f1-c216-4db1-b3e6-cfa862b8cb93","Type":"ContainerDied","Data":"2f753b791176f79d2770b9ecafcbce795dfc8ecb07673c67a3c4df5b21ef9c16"} Nov 26 05:45:16 crc kubenswrapper[4871]: I1126 05:45:16.971070 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-8665945b44-wbcwv" podUID="812fa0f1-c216-4db1-b3e6-cfa862b8cb93" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.163:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.163:8443: connect: connection refused" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.024020 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.039572 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-78dd8485c9-fx6sv" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.129490 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-combined-ca-bundle\") pod \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.129666 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-scripts\") pod \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.129705 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-config-data\") pod \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.129734 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fdjvn\" (UniqueName: \"kubernetes.io/projected/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-kube-api-access-fdjvn\") pod \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.129817 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-horizon-secret-key\") pod \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.129910 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-logs\") pod \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.129952 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-horizon-tls-certs\") pod \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\" (UID: \"812fa0f1-c216-4db1-b3e6-cfa862b8cb93\") " Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.137258 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-logs" (OuterVolumeSpecName: "logs") pod "812fa0f1-c216-4db1-b3e6-cfa862b8cb93" (UID: "812fa0f1-c216-4db1-b3e6-cfa862b8cb93"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.137403 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-kube-api-access-fdjvn" (OuterVolumeSpecName: "kube-api-access-fdjvn") pod "812fa0f1-c216-4db1-b3e6-cfa862b8cb93" (UID: "812fa0f1-c216-4db1-b3e6-cfa862b8cb93"). InnerVolumeSpecName "kube-api-access-fdjvn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.137711 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "812fa0f1-c216-4db1-b3e6-cfa862b8cb93" (UID: "812fa0f1-c216-4db1-b3e6-cfa862b8cb93"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.163799 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-config-data" (OuterVolumeSpecName: "config-data") pod "812fa0f1-c216-4db1-b3e6-cfa862b8cb93" (UID: "812fa0f1-c216-4db1-b3e6-cfa862b8cb93"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.190510 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "812fa0f1-c216-4db1-b3e6-cfa862b8cb93" (UID: "812fa0f1-c216-4db1-b3e6-cfa862b8cb93"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.191031 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-scripts" (OuterVolumeSpecName: "scripts") pod "812fa0f1-c216-4db1-b3e6-cfa862b8cb93" (UID: "812fa0f1-c216-4db1-b3e6-cfa862b8cb93"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.216725 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "812fa0f1-c216-4db1-b3e6-cfa862b8cb93" (UID: "812fa0f1-c216-4db1-b3e6-cfa862b8cb93"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.233918 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.233950 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.233961 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fdjvn\" (UniqueName: \"kubernetes.io/projected/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-kube-api-access-fdjvn\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.233974 4871 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.233983 4871 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-logs\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.233992 4871 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.234000 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/812fa0f1-c216-4db1-b3e6-cfa862b8cb93-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.432047 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"7218b9c9-2508-46eb-8942-4c22b0c706cf","Type":"ContainerStarted","Data":"973f7de88a7c62d8201a29eb415d0702feada34ddffc286dcd3863930b388cab"} Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.437811 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5405fb24-87e0-4f32-ac6f-0f59338056ae","Type":"ContainerStarted","Data":"5b80a7737d420bade11415d09240f2aeeac3fb011f9eba5a4f25daaf00a46367"} Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.444374 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8665945b44-wbcwv" event={"ID":"812fa0f1-c216-4db1-b3e6-cfa862b8cb93","Type":"ContainerDied","Data":"157a44f46232c0458ca98556cb193e81b3368d1f99c8c97c9fd4a0bbe2a2a9a7"} Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.444425 4871 scope.go:117] "RemoveContainer" containerID="c136384f154b1d112425776e2afc3e7ec4248dce8ee44c4fa5505218badfb75c" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.444564 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8665945b44-wbcwv" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.488517 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.449313784 podStartE2EDuration="17.488495713s" podCreationTimestamp="2025-11-26 05:45:01 +0000 UTC" firstStartedPulling="2025-11-26 05:45:02.727869995 +0000 UTC m=+1160.910921581" lastFinishedPulling="2025-11-26 05:45:17.767051924 +0000 UTC m=+1175.950103510" observedRunningTime="2025-11-26 05:45:18.459685757 +0000 UTC m=+1176.642737383" watchObservedRunningTime="2025-11-26 05:45:18.488495713 +0000 UTC m=+1176.671547319" Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.503719 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-fg97c"] Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.532675 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-8665945b44-wbcwv"] Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.532870 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-38ab-account-create-update-5728z"] Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.547297 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-8665945b44-wbcwv"] Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.553870 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-rwvqt"] Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.561794 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-rswx2"] Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.569427 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-5174-account-create-update-j9rcn"] Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.648789 4871 scope.go:117] "RemoveContainer" containerID="2f753b791176f79d2770b9ecafcbce795dfc8ecb07673c67a3c4df5b21ef9c16" Nov 26 05:45:18 crc kubenswrapper[4871]: W1126 05:45:18.655926 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcc22a7bb_6c3c_44cb_b37c_7d31cab8b3d2.slice/crio-baa17c754524a11fdb02d4994ef582ce6ec2824fc40ab1f4f83e7abcd369f0d5 WatchSource:0}: Error finding container baa17c754524a11fdb02d4994ef582ce6ec2824fc40ab1f4f83e7abcd369f0d5: Status 404 returned error can't find the container with id baa17c754524a11fdb02d4994ef582ce6ec2824fc40ab1f4f83e7abcd369f0d5 Nov 26 05:45:18 crc kubenswrapper[4871]: I1126 05:45:18.732941 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-c975-account-create-update-767bz"] Nov 26 05:45:19 crc kubenswrapper[4871]: I1126 05:45:19.475957 4871 generic.go:334] "Generic (PLEG): container finished" podID="50e575b9-71c6-466e-993f-12c04b2834db" containerID="51e4c2a3c34347cc9248d4b1233df6cfaf2e3cb0192befb46dc1e2d0dfb1190a" exitCode=0 Nov 26 05:45:19 crc kubenswrapper[4871]: I1126 05:45:19.476053 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-fg97c" event={"ID":"50e575b9-71c6-466e-993f-12c04b2834db","Type":"ContainerDied","Data":"51e4c2a3c34347cc9248d4b1233df6cfaf2e3cb0192befb46dc1e2d0dfb1190a"} Nov 26 05:45:19 crc kubenswrapper[4871]: I1126 05:45:19.476296 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-fg97c" event={"ID":"50e575b9-71c6-466e-993f-12c04b2834db","Type":"ContainerStarted","Data":"b4abeaef631d11ad9af14fb49f71201f5cf9ba16192dc8e75f96b0ab7ff48793"} Nov 26 05:45:19 crc kubenswrapper[4871]: I1126 05:45:19.479245 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-rswx2" event={"ID":"7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc","Type":"ContainerStarted","Data":"6849d4a873e1e5d2caa159f5a3c1ade14bdc815457cee33ed53a41427dd223be"} Nov 26 05:45:19 crc kubenswrapper[4871]: I1126 05:45:19.479265 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-rswx2" event={"ID":"7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc","Type":"ContainerStarted","Data":"890e8d114057f68558b18c9662052006caff683f24173b9e2aedc470d98fc7f9"} Nov 26 05:45:19 crc kubenswrapper[4871]: I1126 05:45:19.481330 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5174-account-create-update-j9rcn" event={"ID":"b91e09cf-5ae8-4288-a212-d52274d5ef05","Type":"ContainerStarted","Data":"173ab77996bfa5ec292feb115b28f5e6425124aff186a3bb2422bbca36d975df"} Nov 26 05:45:19 crc kubenswrapper[4871]: I1126 05:45:19.481417 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5174-account-create-update-j9rcn" event={"ID":"b91e09cf-5ae8-4288-a212-d52274d5ef05","Type":"ContainerStarted","Data":"b10f203ece5b489d1be2e4a320519b644bfd4aa69d867589327a38cfc734bbde"} Nov 26 05:45:19 crc kubenswrapper[4871]: I1126 05:45:19.482679 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c975-account-create-update-767bz" event={"ID":"32032cf4-30d8-45c4-a12e-f1e79eda1c52","Type":"ContainerStarted","Data":"c20c3c89269396c982f3c2b18f81f453cf0b902607d7efaafef8ce3eed1c4acd"} Nov 26 05:45:19 crc kubenswrapper[4871]: I1126 05:45:19.482704 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c975-account-create-update-767bz" event={"ID":"32032cf4-30d8-45c4-a12e-f1e79eda1c52","Type":"ContainerStarted","Data":"ff43108f14f2ae331376f71d3279bfdbf0a5be6cfc888e4f1a91a37594fe8dac"} Nov 26 05:45:19 crc kubenswrapper[4871]: I1126 05:45:19.484855 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-rwvqt" event={"ID":"e161c874-eca9-4f95-8419-660b27e5d21e","Type":"ContainerStarted","Data":"5b086dbe6d815cb7b39d7cbbe9bbf31136fc2199d8f5f03f88cf1d73066649db"} Nov 26 05:45:19 crc kubenswrapper[4871]: I1126 05:45:19.484880 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-rwvqt" event={"ID":"e161c874-eca9-4f95-8419-660b27e5d21e","Type":"ContainerStarted","Data":"8f8b37dcdaf1085dc7c1868368b486ab4a935503cf81f95531c3baf44ce6e8e7"} Nov 26 05:45:19 crc kubenswrapper[4871]: I1126 05:45:19.487032 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-38ab-account-create-update-5728z" event={"ID":"cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2","Type":"ContainerStarted","Data":"92a185927ee0ba29556a3c7ac25362b8286f1ffca3c6c6170f225b7bdd89316e"} Nov 26 05:45:19 crc kubenswrapper[4871]: I1126 05:45:19.487059 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-38ab-account-create-update-5728z" event={"ID":"cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2","Type":"ContainerStarted","Data":"baa17c754524a11fdb02d4994ef582ce6ec2824fc40ab1f4f83e7abcd369f0d5"} Nov 26 05:45:19 crc kubenswrapper[4871]: I1126 05:45:19.520431 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-38ab-account-create-update-5728z" podStartSLOduration=7.520406086 podStartE2EDuration="7.520406086s" podCreationTimestamp="2025-11-26 05:45:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:45:19.50834523 +0000 UTC m=+1177.691396826" watchObservedRunningTime="2025-11-26 05:45:19.520406086 +0000 UTC m=+1177.703457672" Nov 26 05:45:19 crc kubenswrapper[4871]: I1126 05:45:19.534497 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-c975-account-create-update-767bz" podStartSLOduration=8.534473261 podStartE2EDuration="8.534473261s" podCreationTimestamp="2025-11-26 05:45:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:45:19.526633448 +0000 UTC m=+1177.709685034" watchObservedRunningTime="2025-11-26 05:45:19.534473261 +0000 UTC m=+1177.717524847" Nov 26 05:45:20 crc kubenswrapper[4871]: I1126 05:45:20.498661 4871 generic.go:334] "Generic (PLEG): container finished" podID="32032cf4-30d8-45c4-a12e-f1e79eda1c52" containerID="c20c3c89269396c982f3c2b18f81f453cf0b902607d7efaafef8ce3eed1c4acd" exitCode=0 Nov 26 05:45:20 crc kubenswrapper[4871]: I1126 05:45:20.498769 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c975-account-create-update-767bz" event={"ID":"32032cf4-30d8-45c4-a12e-f1e79eda1c52","Type":"ContainerDied","Data":"c20c3c89269396c982f3c2b18f81f453cf0b902607d7efaafef8ce3eed1c4acd"} Nov 26 05:45:20 crc kubenswrapper[4871]: I1126 05:45:20.502787 4871 generic.go:334] "Generic (PLEG): container finished" podID="e161c874-eca9-4f95-8419-660b27e5d21e" containerID="5b086dbe6d815cb7b39d7cbbe9bbf31136fc2199d8f5f03f88cf1d73066649db" exitCode=0 Nov 26 05:45:20 crc kubenswrapper[4871]: I1126 05:45:20.502875 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-rwvqt" event={"ID":"e161c874-eca9-4f95-8419-660b27e5d21e","Type":"ContainerDied","Data":"5b086dbe6d815cb7b39d7cbbe9bbf31136fc2199d8f5f03f88cf1d73066649db"} Nov 26 05:45:20 crc kubenswrapper[4871]: I1126 05:45:20.506065 4871 generic.go:334] "Generic (PLEG): container finished" podID="cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2" containerID="92a185927ee0ba29556a3c7ac25362b8286f1ffca3c6c6170f225b7bdd89316e" exitCode=0 Nov 26 05:45:20 crc kubenswrapper[4871]: I1126 05:45:20.506207 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-38ab-account-create-update-5728z" event={"ID":"cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2","Type":"ContainerDied","Data":"92a185927ee0ba29556a3c7ac25362b8286f1ffca3c6c6170f225b7bdd89316e"} Nov 26 05:45:20 crc kubenswrapper[4871]: I1126 05:45:20.513206 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerName="ceilometer-central-agent" containerID="cri-o://d7ac3c5b17165b94b45df75a484e8a4884eae0972693cf02e24b2da9617517f2" gracePeriod=30 Nov 26 05:45:20 crc kubenswrapper[4871]: I1126 05:45:20.513265 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerName="sg-core" containerID="cri-o://5b80a7737d420bade11415d09240f2aeeac3fb011f9eba5a4f25daaf00a46367" gracePeriod=30 Nov 26 05:45:20 crc kubenswrapper[4871]: I1126 05:45:20.513282 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerName="ceilometer-notification-agent" containerID="cri-o://3666c1504d2a15b696f3425c3ca8c4dc6b00b50ba54bf8ca2bb81945b32241a7" gracePeriod=30 Nov 26 05:45:20 crc kubenswrapper[4871]: I1126 05:45:20.513298 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerName="proxy-httpd" containerID="cri-o://7e775d7f63b988c0f9f4bd825ede6aa006002c3f96bfffe8a7981d968c649ce3" gracePeriod=30 Nov 26 05:45:20 crc kubenswrapper[4871]: I1126 05:45:20.524161 4871 generic.go:334] "Generic (PLEG): container finished" podID="7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc" containerID="6849d4a873e1e5d2caa159f5a3c1ade14bdc815457cee33ed53a41427dd223be" exitCode=0 Nov 26 05:45:20 crc kubenswrapper[4871]: I1126 05:45:20.529783 4871 generic.go:334] "Generic (PLEG): container finished" podID="b91e09cf-5ae8-4288-a212-d52274d5ef05" containerID="173ab77996bfa5ec292feb115b28f5e6425124aff186a3bb2422bbca36d975df" exitCode=0 Nov 26 05:45:20 crc kubenswrapper[4871]: I1126 05:45:20.578285 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.49681543 podStartE2EDuration="11.578267855s" podCreationTimestamp="2025-11-26 05:45:09 +0000 UTC" firstStartedPulling="2025-11-26 05:45:10.298024415 +0000 UTC m=+1168.481076041" lastFinishedPulling="2025-11-26 05:45:19.37947688 +0000 UTC m=+1177.562528466" observedRunningTime="2025-11-26 05:45:20.557777102 +0000 UTC m=+1178.740828698" watchObservedRunningTime="2025-11-26 05:45:20.578267855 +0000 UTC m=+1178.761319441" Nov 26 05:45:20 crc kubenswrapper[4871]: I1126 05:45:20.584942 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="812fa0f1-c216-4db1-b3e6-cfa862b8cb93" path="/var/lib/kubelet/pods/812fa0f1-c216-4db1-b3e6-cfa862b8cb93/volumes" Nov 26 05:45:20 crc kubenswrapper[4871]: I1126 05:45:20.585626 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5405fb24-87e0-4f32-ac6f-0f59338056ae","Type":"ContainerStarted","Data":"7e775d7f63b988c0f9f4bd825ede6aa006002c3f96bfffe8a7981d968c649ce3"} Nov 26 05:45:20 crc kubenswrapper[4871]: I1126 05:45:20.585706 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 05:45:20 crc kubenswrapper[4871]: I1126 05:45:20.585720 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-rswx2" event={"ID":"7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc","Type":"ContainerDied","Data":"6849d4a873e1e5d2caa159f5a3c1ade14bdc815457cee33ed53a41427dd223be"} Nov 26 05:45:20 crc kubenswrapper[4871]: I1126 05:45:20.585735 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5174-account-create-update-j9rcn" event={"ID":"b91e09cf-5ae8-4288-a212-d52274d5ef05","Type":"ContainerDied","Data":"173ab77996bfa5ec292feb115b28f5e6425124aff186a3bb2422bbca36d975df"} Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.113342 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-rwvqt" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.121259 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rswx2" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.135988 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5174-account-create-update-j9rcn" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.143728 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-fg97c" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.203237 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7qpl\" (UniqueName: \"kubernetes.io/projected/e161c874-eca9-4f95-8419-660b27e5d21e-kube-api-access-h7qpl\") pod \"e161c874-eca9-4f95-8419-660b27e5d21e\" (UID: \"e161c874-eca9-4f95-8419-660b27e5d21e\") " Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.203362 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b91e09cf-5ae8-4288-a212-d52274d5ef05-operator-scripts\") pod \"b91e09cf-5ae8-4288-a212-d52274d5ef05\" (UID: \"b91e09cf-5ae8-4288-a212-d52274d5ef05\") " Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.203413 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e161c874-eca9-4f95-8419-660b27e5d21e-operator-scripts\") pod \"e161c874-eca9-4f95-8419-660b27e5d21e\" (UID: \"e161c874-eca9-4f95-8419-660b27e5d21e\") " Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.203473 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc-operator-scripts\") pod \"7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc\" (UID: \"7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc\") " Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.203542 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-st47d\" (UniqueName: \"kubernetes.io/projected/7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc-kube-api-access-st47d\") pod \"7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc\" (UID: \"7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc\") " Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.203610 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4rfx\" (UniqueName: \"kubernetes.io/projected/b91e09cf-5ae8-4288-a212-d52274d5ef05-kube-api-access-v4rfx\") pod \"b91e09cf-5ae8-4288-a212-d52274d5ef05\" (UID: \"b91e09cf-5ae8-4288-a212-d52274d5ef05\") " Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.204504 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc" (UID: "7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.204917 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b91e09cf-5ae8-4288-a212-d52274d5ef05-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b91e09cf-5ae8-4288-a212-d52274d5ef05" (UID: "b91e09cf-5ae8-4288-a212-d52274d5ef05"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.205376 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e161c874-eca9-4f95-8419-660b27e5d21e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e161c874-eca9-4f95-8419-660b27e5d21e" (UID: "e161c874-eca9-4f95-8419-660b27e5d21e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.209785 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e161c874-eca9-4f95-8419-660b27e5d21e-kube-api-access-h7qpl" (OuterVolumeSpecName: "kube-api-access-h7qpl") pod "e161c874-eca9-4f95-8419-660b27e5d21e" (UID: "e161c874-eca9-4f95-8419-660b27e5d21e"). InnerVolumeSpecName "kube-api-access-h7qpl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.209836 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b91e09cf-5ae8-4288-a212-d52274d5ef05-kube-api-access-v4rfx" (OuterVolumeSpecName: "kube-api-access-v4rfx") pod "b91e09cf-5ae8-4288-a212-d52274d5ef05" (UID: "b91e09cf-5ae8-4288-a212-d52274d5ef05"). InnerVolumeSpecName "kube-api-access-v4rfx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.210075 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc-kube-api-access-st47d" (OuterVolumeSpecName: "kube-api-access-st47d") pod "7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc" (UID: "7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc"). InnerVolumeSpecName "kube-api-access-st47d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.305451 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-phptp\" (UniqueName: \"kubernetes.io/projected/50e575b9-71c6-466e-993f-12c04b2834db-kube-api-access-phptp\") pod \"50e575b9-71c6-466e-993f-12c04b2834db\" (UID: \"50e575b9-71c6-466e-993f-12c04b2834db\") " Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.305543 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50e575b9-71c6-466e-993f-12c04b2834db-operator-scripts\") pod \"50e575b9-71c6-466e-993f-12c04b2834db\" (UID: \"50e575b9-71c6-466e-993f-12c04b2834db\") " Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.305917 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.305934 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-st47d\" (UniqueName: \"kubernetes.io/projected/7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc-kube-api-access-st47d\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.305944 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4rfx\" (UniqueName: \"kubernetes.io/projected/b91e09cf-5ae8-4288-a212-d52274d5ef05-kube-api-access-v4rfx\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.305953 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7qpl\" (UniqueName: \"kubernetes.io/projected/e161c874-eca9-4f95-8419-660b27e5d21e-kube-api-access-h7qpl\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.305962 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b91e09cf-5ae8-4288-a212-d52274d5ef05-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.305970 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e161c874-eca9-4f95-8419-660b27e5d21e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.306274 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50e575b9-71c6-466e-993f-12c04b2834db-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "50e575b9-71c6-466e-993f-12c04b2834db" (UID: "50e575b9-71c6-466e-993f-12c04b2834db"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.310161 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50e575b9-71c6-466e-993f-12c04b2834db-kube-api-access-phptp" (OuterVolumeSpecName: "kube-api-access-phptp") pod "50e575b9-71c6-466e-993f-12c04b2834db" (UID: "50e575b9-71c6-466e-993f-12c04b2834db"). InnerVolumeSpecName "kube-api-access-phptp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.407676 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-phptp\" (UniqueName: \"kubernetes.io/projected/50e575b9-71c6-466e-993f-12c04b2834db-kube-api-access-phptp\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.407710 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/50e575b9-71c6-466e-993f-12c04b2834db-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.544235 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-fg97c" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.544222 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-fg97c" event={"ID":"50e575b9-71c6-466e-993f-12c04b2834db","Type":"ContainerDied","Data":"b4abeaef631d11ad9af14fb49f71201f5cf9ba16192dc8e75f96b0ab7ff48793"} Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.544453 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b4abeaef631d11ad9af14fb49f71201f5cf9ba16192dc8e75f96b0ab7ff48793" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.548850 4871 generic.go:334] "Generic (PLEG): container finished" podID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerID="7e775d7f63b988c0f9f4bd825ede6aa006002c3f96bfffe8a7981d968c649ce3" exitCode=0 Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.549113 4871 generic.go:334] "Generic (PLEG): container finished" podID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerID="5b80a7737d420bade11415d09240f2aeeac3fb011f9eba5a4f25daaf00a46367" exitCode=2 Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.549225 4871 generic.go:334] "Generic (PLEG): container finished" podID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerID="d7ac3c5b17165b94b45df75a484e8a4884eae0972693cf02e24b2da9617517f2" exitCode=0 Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.548943 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5405fb24-87e0-4f32-ac6f-0f59338056ae","Type":"ContainerDied","Data":"7e775d7f63b988c0f9f4bd825ede6aa006002c3f96bfffe8a7981d968c649ce3"} Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.549549 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5405fb24-87e0-4f32-ac6f-0f59338056ae","Type":"ContainerDied","Data":"5b80a7737d420bade11415d09240f2aeeac3fb011f9eba5a4f25daaf00a46367"} Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.549657 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5405fb24-87e0-4f32-ac6f-0f59338056ae","Type":"ContainerDied","Data":"d7ac3c5b17165b94b45df75a484e8a4884eae0972693cf02e24b2da9617517f2"} Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.552248 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-rswx2" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.552909 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-rswx2" event={"ID":"7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc","Type":"ContainerDied","Data":"890e8d114057f68558b18c9662052006caff683f24173b9e2aedc470d98fc7f9"} Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.553034 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="890e8d114057f68558b18c9662052006caff683f24173b9e2aedc470d98fc7f9" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.555325 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-5174-account-create-update-j9rcn" event={"ID":"b91e09cf-5ae8-4288-a212-d52274d5ef05","Type":"ContainerDied","Data":"b10f203ece5b489d1be2e4a320519b644bfd4aa69d867589327a38cfc734bbde"} Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.555365 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b10f203ece5b489d1be2e4a320519b644bfd4aa69d867589327a38cfc734bbde" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.555364 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-5174-account-create-update-j9rcn" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.561097 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-rwvqt" event={"ID":"e161c874-eca9-4f95-8419-660b27e5d21e","Type":"ContainerDied","Data":"8f8b37dcdaf1085dc7c1868368b486ab4a935503cf81f95531c3baf44ce6e8e7"} Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.564188 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8f8b37dcdaf1085dc7c1868368b486ab4a935503cf81f95531c3baf44ce6e8e7" Nov 26 05:45:21 crc kubenswrapper[4871]: I1126 05:45:21.561205 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-rwvqt" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.035300 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-38ab-account-create-update-5728z" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.040753 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c975-account-create-update-767bz" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.122659 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/32032cf4-30d8-45c4-a12e-f1e79eda1c52-operator-scripts\") pod \"32032cf4-30d8-45c4-a12e-f1e79eda1c52\" (UID: \"32032cf4-30d8-45c4-a12e-f1e79eda1c52\") " Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.122710 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-27gl7\" (UniqueName: \"kubernetes.io/projected/32032cf4-30d8-45c4-a12e-f1e79eda1c52-kube-api-access-27gl7\") pod \"32032cf4-30d8-45c4-a12e-f1e79eda1c52\" (UID: \"32032cf4-30d8-45c4-a12e-f1e79eda1c52\") " Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.122901 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xn7br\" (UniqueName: \"kubernetes.io/projected/cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2-kube-api-access-xn7br\") pod \"cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2\" (UID: \"cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2\") " Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.122943 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2-operator-scripts\") pod \"cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2\" (UID: \"cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2\") " Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.123135 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/32032cf4-30d8-45c4-a12e-f1e79eda1c52-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "32032cf4-30d8-45c4-a12e-f1e79eda1c52" (UID: "32032cf4-30d8-45c4-a12e-f1e79eda1c52"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.123517 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/32032cf4-30d8-45c4-a12e-f1e79eda1c52-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.124729 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2" (UID: "cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.137490 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2-kube-api-access-xn7br" (OuterVolumeSpecName: "kube-api-access-xn7br") pod "cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2" (UID: "cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2"). InnerVolumeSpecName "kube-api-access-xn7br". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.148610 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32032cf4-30d8-45c4-a12e-f1e79eda1c52-kube-api-access-27gl7" (OuterVolumeSpecName: "kube-api-access-27gl7") pod "32032cf4-30d8-45c4-a12e-f1e79eda1c52" (UID: "32032cf4-30d8-45c4-a12e-f1e79eda1c52"). InnerVolumeSpecName "kube-api-access-27gl7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.224732 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-27gl7\" (UniqueName: \"kubernetes.io/projected/32032cf4-30d8-45c4-a12e-f1e79eda1c52-kube-api-access-27gl7\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.224757 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xn7br\" (UniqueName: \"kubernetes.io/projected/cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2-kube-api-access-xn7br\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.224790 4871 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.417492 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.540180 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5405fb24-87e0-4f32-ac6f-0f59338056ae-run-httpd\") pod \"5405fb24-87e0-4f32-ac6f-0f59338056ae\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.540269 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-config-data\") pod \"5405fb24-87e0-4f32-ac6f-0f59338056ae\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.540346 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-combined-ca-bundle\") pod \"5405fb24-87e0-4f32-ac6f-0f59338056ae\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.540411 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-scripts\") pod \"5405fb24-87e0-4f32-ac6f-0f59338056ae\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.540517 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-sg-core-conf-yaml\") pod \"5405fb24-87e0-4f32-ac6f-0f59338056ae\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.540591 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nmsdv\" (UniqueName: \"kubernetes.io/projected/5405fb24-87e0-4f32-ac6f-0f59338056ae-kube-api-access-nmsdv\") pod \"5405fb24-87e0-4f32-ac6f-0f59338056ae\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.540673 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5405fb24-87e0-4f32-ac6f-0f59338056ae-log-httpd\") pod \"5405fb24-87e0-4f32-ac6f-0f59338056ae\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.540826 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5405fb24-87e0-4f32-ac6f-0f59338056ae-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5405fb24-87e0-4f32-ac6f-0f59338056ae" (UID: "5405fb24-87e0-4f32-ac6f-0f59338056ae"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.541318 4871 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5405fb24-87e0-4f32-ac6f-0f59338056ae-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.541740 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5405fb24-87e0-4f32-ac6f-0f59338056ae-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5405fb24-87e0-4f32-ac6f-0f59338056ae" (UID: "5405fb24-87e0-4f32-ac6f-0f59338056ae"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.547559 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5405fb24-87e0-4f32-ac6f-0f59338056ae-kube-api-access-nmsdv" (OuterVolumeSpecName: "kube-api-access-nmsdv") pod "5405fb24-87e0-4f32-ac6f-0f59338056ae" (UID: "5405fb24-87e0-4f32-ac6f-0f59338056ae"). InnerVolumeSpecName "kube-api-access-nmsdv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.557851 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-scripts" (OuterVolumeSpecName: "scripts") pod "5405fb24-87e0-4f32-ac6f-0f59338056ae" (UID: "5405fb24-87e0-4f32-ac6f-0f59338056ae"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.577076 4871 generic.go:334] "Generic (PLEG): container finished" podID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerID="3666c1504d2a15b696f3425c3ca8c4dc6b00b50ba54bf8ca2bb81945b32241a7" exitCode=0 Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.577144 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.577152 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5405fb24-87e0-4f32-ac6f-0f59338056ae","Type":"ContainerDied","Data":"3666c1504d2a15b696f3425c3ca8c4dc6b00b50ba54bf8ca2bb81945b32241a7"} Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.577253 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5405fb24-87e0-4f32-ac6f-0f59338056ae","Type":"ContainerDied","Data":"c38b6a399abdc6f6f6c1bf3fcb9e3ee05ddce33516652256db8771fc13d5b954"} Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.577272 4871 scope.go:117] "RemoveContainer" containerID="7e775d7f63b988c0f9f4bd825ede6aa006002c3f96bfffe8a7981d968c649ce3" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.578918 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c975-account-create-update-767bz" event={"ID":"32032cf4-30d8-45c4-a12e-f1e79eda1c52","Type":"ContainerDied","Data":"ff43108f14f2ae331376f71d3279bfdbf0a5be6cfc888e4f1a91a37594fe8dac"} Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.578976 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ff43108f14f2ae331376f71d3279bfdbf0a5be6cfc888e4f1a91a37594fe8dac" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.578929 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c975-account-create-update-767bz" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.579077 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5405fb24-87e0-4f32-ac6f-0f59338056ae" (UID: "5405fb24-87e0-4f32-ac6f-0f59338056ae"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.582414 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-38ab-account-create-update-5728z" event={"ID":"cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2","Type":"ContainerDied","Data":"baa17c754524a11fdb02d4994ef582ce6ec2824fc40ab1f4f83e7abcd369f0d5"} Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.582449 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-38ab-account-create-update-5728z" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.582454 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="baa17c754524a11fdb02d4994ef582ce6ec2824fc40ab1f4f83e7abcd369f0d5" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.597030 4871 scope.go:117] "RemoveContainer" containerID="5b80a7737d420bade11415d09240f2aeeac3fb011f9eba5a4f25daaf00a46367" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.616291 4871 scope.go:117] "RemoveContainer" containerID="3666c1504d2a15b696f3425c3ca8c4dc6b00b50ba54bf8ca2bb81945b32241a7" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.637703 4871 scope.go:117] "RemoveContainer" containerID="d7ac3c5b17165b94b45df75a484e8a4884eae0972693cf02e24b2da9617517f2" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.642192 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5405fb24-87e0-4f32-ac6f-0f59338056ae" (UID: "5405fb24-87e0-4f32-ac6f-0f59338056ae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.642310 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-combined-ca-bundle\") pod \"5405fb24-87e0-4f32-ac6f-0f59338056ae\" (UID: \"5405fb24-87e0-4f32-ac6f-0f59338056ae\") " Nov 26 05:45:22 crc kubenswrapper[4871]: W1126 05:45:22.642582 4871 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/5405fb24-87e0-4f32-ac6f-0f59338056ae/volumes/kubernetes.io~secret/combined-ca-bundle Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.642601 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5405fb24-87e0-4f32-ac6f-0f59338056ae" (UID: "5405fb24-87e0-4f32-ac6f-0f59338056ae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.643123 4871 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.643148 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nmsdv\" (UniqueName: \"kubernetes.io/projected/5405fb24-87e0-4f32-ac6f-0f59338056ae-kube-api-access-nmsdv\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.643163 4871 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5405fb24-87e0-4f32-ac6f-0f59338056ae-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.643176 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.643187 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.677100 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-config-data" (OuterVolumeSpecName: "config-data") pod "5405fb24-87e0-4f32-ac6f-0f59338056ae" (UID: "5405fb24-87e0-4f32-ac6f-0f59338056ae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.745407 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5405fb24-87e0-4f32-ac6f-0f59338056ae-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.756138 4871 scope.go:117] "RemoveContainer" containerID="7e775d7f63b988c0f9f4bd825ede6aa006002c3f96bfffe8a7981d968c649ce3" Nov 26 05:45:22 crc kubenswrapper[4871]: E1126 05:45:22.756547 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e775d7f63b988c0f9f4bd825ede6aa006002c3f96bfffe8a7981d968c649ce3\": container with ID starting with 7e775d7f63b988c0f9f4bd825ede6aa006002c3f96bfffe8a7981d968c649ce3 not found: ID does not exist" containerID="7e775d7f63b988c0f9f4bd825ede6aa006002c3f96bfffe8a7981d968c649ce3" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.756579 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e775d7f63b988c0f9f4bd825ede6aa006002c3f96bfffe8a7981d968c649ce3"} err="failed to get container status \"7e775d7f63b988c0f9f4bd825ede6aa006002c3f96bfffe8a7981d968c649ce3\": rpc error: code = NotFound desc = could not find container \"7e775d7f63b988c0f9f4bd825ede6aa006002c3f96bfffe8a7981d968c649ce3\": container with ID starting with 7e775d7f63b988c0f9f4bd825ede6aa006002c3f96bfffe8a7981d968c649ce3 not found: ID does not exist" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.756599 4871 scope.go:117] "RemoveContainer" containerID="5b80a7737d420bade11415d09240f2aeeac3fb011f9eba5a4f25daaf00a46367" Nov 26 05:45:22 crc kubenswrapper[4871]: E1126 05:45:22.756935 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b80a7737d420bade11415d09240f2aeeac3fb011f9eba5a4f25daaf00a46367\": container with ID starting with 5b80a7737d420bade11415d09240f2aeeac3fb011f9eba5a4f25daaf00a46367 not found: ID does not exist" containerID="5b80a7737d420bade11415d09240f2aeeac3fb011f9eba5a4f25daaf00a46367" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.756957 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b80a7737d420bade11415d09240f2aeeac3fb011f9eba5a4f25daaf00a46367"} err="failed to get container status \"5b80a7737d420bade11415d09240f2aeeac3fb011f9eba5a4f25daaf00a46367\": rpc error: code = NotFound desc = could not find container \"5b80a7737d420bade11415d09240f2aeeac3fb011f9eba5a4f25daaf00a46367\": container with ID starting with 5b80a7737d420bade11415d09240f2aeeac3fb011f9eba5a4f25daaf00a46367 not found: ID does not exist" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.756971 4871 scope.go:117] "RemoveContainer" containerID="3666c1504d2a15b696f3425c3ca8c4dc6b00b50ba54bf8ca2bb81945b32241a7" Nov 26 05:45:22 crc kubenswrapper[4871]: E1126 05:45:22.757203 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3666c1504d2a15b696f3425c3ca8c4dc6b00b50ba54bf8ca2bb81945b32241a7\": container with ID starting with 3666c1504d2a15b696f3425c3ca8c4dc6b00b50ba54bf8ca2bb81945b32241a7 not found: ID does not exist" containerID="3666c1504d2a15b696f3425c3ca8c4dc6b00b50ba54bf8ca2bb81945b32241a7" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.757229 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3666c1504d2a15b696f3425c3ca8c4dc6b00b50ba54bf8ca2bb81945b32241a7"} err="failed to get container status \"3666c1504d2a15b696f3425c3ca8c4dc6b00b50ba54bf8ca2bb81945b32241a7\": rpc error: code = NotFound desc = could not find container \"3666c1504d2a15b696f3425c3ca8c4dc6b00b50ba54bf8ca2bb81945b32241a7\": container with ID starting with 3666c1504d2a15b696f3425c3ca8c4dc6b00b50ba54bf8ca2bb81945b32241a7 not found: ID does not exist" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.757244 4871 scope.go:117] "RemoveContainer" containerID="d7ac3c5b17165b94b45df75a484e8a4884eae0972693cf02e24b2da9617517f2" Nov 26 05:45:22 crc kubenswrapper[4871]: E1126 05:45:22.757505 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7ac3c5b17165b94b45df75a484e8a4884eae0972693cf02e24b2da9617517f2\": container with ID starting with d7ac3c5b17165b94b45df75a484e8a4884eae0972693cf02e24b2da9617517f2 not found: ID does not exist" containerID="d7ac3c5b17165b94b45df75a484e8a4884eae0972693cf02e24b2da9617517f2" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.757571 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7ac3c5b17165b94b45df75a484e8a4884eae0972693cf02e24b2da9617517f2"} err="failed to get container status \"d7ac3c5b17165b94b45df75a484e8a4884eae0972693cf02e24b2da9617517f2\": rpc error: code = NotFound desc = could not find container \"d7ac3c5b17165b94b45df75a484e8a4884eae0972693cf02e24b2da9617517f2\": container with ID starting with d7ac3c5b17165b94b45df75a484e8a4884eae0972693cf02e24b2da9617517f2 not found: ID does not exist" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.931744 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.941485 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.964384 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:22 crc kubenswrapper[4871]: E1126 05:45:22.964856 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32032cf4-30d8-45c4-a12e-f1e79eda1c52" containerName="mariadb-account-create-update" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.964878 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="32032cf4-30d8-45c4-a12e-f1e79eda1c52" containerName="mariadb-account-create-update" Nov 26 05:45:22 crc kubenswrapper[4871]: E1126 05:45:22.964894 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerName="proxy-httpd" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.964901 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerName="proxy-httpd" Nov 26 05:45:22 crc kubenswrapper[4871]: E1126 05:45:22.964915 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2" containerName="mariadb-account-create-update" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.964924 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2" containerName="mariadb-account-create-update" Nov 26 05:45:22 crc kubenswrapper[4871]: E1126 05:45:22.964933 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc" containerName="mariadb-database-create" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.964940 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc" containerName="mariadb-database-create" Nov 26 05:45:22 crc kubenswrapper[4871]: E1126 05:45:22.964959 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="812fa0f1-c216-4db1-b3e6-cfa862b8cb93" containerName="horizon-log" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.964967 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="812fa0f1-c216-4db1-b3e6-cfa862b8cb93" containerName="horizon-log" Nov 26 05:45:22 crc kubenswrapper[4871]: E1126 05:45:22.964984 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50e575b9-71c6-466e-993f-12c04b2834db" containerName="mariadb-database-create" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.964992 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="50e575b9-71c6-466e-993f-12c04b2834db" containerName="mariadb-database-create" Nov 26 05:45:22 crc kubenswrapper[4871]: E1126 05:45:22.965001 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerName="sg-core" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.965009 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerName="sg-core" Nov 26 05:45:22 crc kubenswrapper[4871]: E1126 05:45:22.965027 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerName="ceilometer-central-agent" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.965035 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerName="ceilometer-central-agent" Nov 26 05:45:22 crc kubenswrapper[4871]: E1126 05:45:22.965046 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b91e09cf-5ae8-4288-a212-d52274d5ef05" containerName="mariadb-account-create-update" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.965055 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="b91e09cf-5ae8-4288-a212-d52274d5ef05" containerName="mariadb-account-create-update" Nov 26 05:45:22 crc kubenswrapper[4871]: E1126 05:45:22.965069 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="812fa0f1-c216-4db1-b3e6-cfa862b8cb93" containerName="horizon" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.965076 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="812fa0f1-c216-4db1-b3e6-cfa862b8cb93" containerName="horizon" Nov 26 05:45:22 crc kubenswrapper[4871]: E1126 05:45:22.965102 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e161c874-eca9-4f95-8419-660b27e5d21e" containerName="mariadb-database-create" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.965110 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="e161c874-eca9-4f95-8419-660b27e5d21e" containerName="mariadb-database-create" Nov 26 05:45:22 crc kubenswrapper[4871]: E1126 05:45:22.965128 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerName="ceilometer-notification-agent" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.965135 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerName="ceilometer-notification-agent" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.965334 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="812fa0f1-c216-4db1-b3e6-cfa862b8cb93" containerName="horizon-log" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.965355 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc" containerName="mariadb-database-create" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.965365 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="32032cf4-30d8-45c4-a12e-f1e79eda1c52" containerName="mariadb-account-create-update" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.965379 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="812fa0f1-c216-4db1-b3e6-cfa862b8cb93" containerName="horizon" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.965392 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="50e575b9-71c6-466e-993f-12c04b2834db" containerName="mariadb-database-create" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.965401 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2" containerName="mariadb-account-create-update" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.965412 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerName="ceilometer-notification-agent" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.965419 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="b91e09cf-5ae8-4288-a212-d52274d5ef05" containerName="mariadb-account-create-update" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.965427 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerName="ceilometer-central-agent" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.965440 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerName="proxy-httpd" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.965454 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="5405fb24-87e0-4f32-ac6f-0f59338056ae" containerName="sg-core" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.965467 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="e161c874-eca9-4f95-8419-660b27e5d21e" containerName="mariadb-database-create" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.967555 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.970778 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.970985 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 05:45:22 crc kubenswrapper[4871]: I1126 05:45:22.976713 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.051324 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc93269b-50ab-42ec-b316-00393b3d7751-run-httpd\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.051393 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gbqc\" (UniqueName: \"kubernetes.io/projected/cc93269b-50ab-42ec-b316-00393b3d7751-kube-api-access-4gbqc\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.051498 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-config-data\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.051583 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-scripts\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.051657 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc93269b-50ab-42ec-b316-00393b3d7751-log-httpd\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.051849 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.051889 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.154381 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc93269b-50ab-42ec-b316-00393b3d7751-run-httpd\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.154453 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4gbqc\" (UniqueName: \"kubernetes.io/projected/cc93269b-50ab-42ec-b316-00393b3d7751-kube-api-access-4gbqc\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.154487 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-config-data\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.154510 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-scripts\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.155113 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc93269b-50ab-42ec-b316-00393b3d7751-run-httpd\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.159814 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc93269b-50ab-42ec-b316-00393b3d7751-log-httpd\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.160036 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.160082 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.161077 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc93269b-50ab-42ec-b316-00393b3d7751-log-httpd\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.164075 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-config-data\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.169099 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.169274 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-scripts\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.175088 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.179088 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gbqc\" (UniqueName: \"kubernetes.io/projected/cc93269b-50ab-42ec-b316-00393b3d7751-kube-api-access-4gbqc\") pod \"ceilometer-0\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.285788 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:45:23 crc kubenswrapper[4871]: I1126 05:45:23.734905 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:23 crc kubenswrapper[4871]: W1126 05:45:23.738538 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcc93269b_50ab_42ec_b316_00393b3d7751.slice/crio-95b7458c576b2555ced8543c79adf7e5a1f496f3a8eab0e34df8cbe370c7dec6 WatchSource:0}: Error finding container 95b7458c576b2555ced8543c79adf7e5a1f496f3a8eab0e34df8cbe370c7dec6: Status 404 returned error can't find the container with id 95b7458c576b2555ced8543c79adf7e5a1f496f3a8eab0e34df8cbe370c7dec6 Nov 26 05:45:24 crc kubenswrapper[4871]: I1126 05:45:24.508121 4871 scope.go:117] "RemoveContainer" containerID="c0dee9dbf5b0d8070947de7e352e4d9d380476356ecdfa075feffa0ed0cfbdac" Nov 26 05:45:24 crc kubenswrapper[4871]: I1126 05:45:24.522585 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5405fb24-87e0-4f32-ac6f-0f59338056ae" path="/var/lib/kubelet/pods/5405fb24-87e0-4f32-ac6f-0f59338056ae/volumes" Nov 26 05:45:24 crc kubenswrapper[4871]: I1126 05:45:24.604652 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc93269b-50ab-42ec-b316-00393b3d7751","Type":"ContainerStarted","Data":"5f0b2b9d5c4271014fe70b57c1328de4e93437bd862e6aef85401f4a8fbb9102"} Nov 26 05:45:24 crc kubenswrapper[4871]: I1126 05:45:24.604914 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc93269b-50ab-42ec-b316-00393b3d7751","Type":"ContainerStarted","Data":"09724c258dbd4fdee6bf3c02e2df545d1dab0b3e6e40cccfef054365c4cf858f"} Nov 26 05:45:24 crc kubenswrapper[4871]: I1126 05:45:24.604928 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc93269b-50ab-42ec-b316-00393b3d7751","Type":"ContainerStarted","Data":"95b7458c576b2555ced8543c79adf7e5a1f496f3a8eab0e34df8cbe370c7dec6"} Nov 26 05:45:25 crc kubenswrapper[4871]: I1126 05:45:25.616245 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc93269b-50ab-42ec-b316-00393b3d7751","Type":"ContainerStarted","Data":"f905198e62a59f32cce4e027ce3975243ab73e386c1a91bc0e65169f4d4e6f43"} Nov 26 05:45:25 crc kubenswrapper[4871]: I1126 05:45:25.618257 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"906807e1-f724-4ab4-9ccc-95656188890e","Type":"ContainerStarted","Data":"cad0798f2898de3a0b31bc0d4cd4c5c6132d0208856588e89b33940a53278e1d"} Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.221814 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.514391 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-tfgn7"] Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.516031 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-tfgn7" Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.518933 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.519355 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.519728 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-5dkq4" Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.529535 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-tfgn7"] Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.543541 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-df6l6\" (UniqueName: \"kubernetes.io/projected/6393db54-1c1f-47bc-8669-e56ed280db54-kube-api-access-df6l6\") pod \"nova-cell0-conductor-db-sync-tfgn7\" (UID: \"6393db54-1c1f-47bc-8669-e56ed280db54\") " pod="openstack/nova-cell0-conductor-db-sync-tfgn7" Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.543599 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-scripts\") pod \"nova-cell0-conductor-db-sync-tfgn7\" (UID: \"6393db54-1c1f-47bc-8669-e56ed280db54\") " pod="openstack/nova-cell0-conductor-db-sync-tfgn7" Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.543627 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-tfgn7\" (UID: \"6393db54-1c1f-47bc-8669-e56ed280db54\") " pod="openstack/nova-cell0-conductor-db-sync-tfgn7" Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.543918 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-config-data\") pod \"nova-cell0-conductor-db-sync-tfgn7\" (UID: \"6393db54-1c1f-47bc-8669-e56ed280db54\") " pod="openstack/nova-cell0-conductor-db-sync-tfgn7" Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.642760 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc93269b-50ab-42ec-b316-00393b3d7751","Type":"ContainerStarted","Data":"9fc8d2dd9a0b2c8181dbb18519b90acfa2304aa655cb3b8d656960f494713cb1"} Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.643959 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.645936 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-df6l6\" (UniqueName: \"kubernetes.io/projected/6393db54-1c1f-47bc-8669-e56ed280db54-kube-api-access-df6l6\") pod \"nova-cell0-conductor-db-sync-tfgn7\" (UID: \"6393db54-1c1f-47bc-8669-e56ed280db54\") " pod="openstack/nova-cell0-conductor-db-sync-tfgn7" Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.645977 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-scripts\") pod \"nova-cell0-conductor-db-sync-tfgn7\" (UID: \"6393db54-1c1f-47bc-8669-e56ed280db54\") " pod="openstack/nova-cell0-conductor-db-sync-tfgn7" Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.645996 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-tfgn7\" (UID: \"6393db54-1c1f-47bc-8669-e56ed280db54\") " pod="openstack/nova-cell0-conductor-db-sync-tfgn7" Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.646103 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-config-data\") pod \"nova-cell0-conductor-db-sync-tfgn7\" (UID: \"6393db54-1c1f-47bc-8669-e56ed280db54\") " pod="openstack/nova-cell0-conductor-db-sync-tfgn7" Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.652260 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-config-data\") pod \"nova-cell0-conductor-db-sync-tfgn7\" (UID: \"6393db54-1c1f-47bc-8669-e56ed280db54\") " pod="openstack/nova-cell0-conductor-db-sync-tfgn7" Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.652621 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-scripts\") pod \"nova-cell0-conductor-db-sync-tfgn7\" (UID: \"6393db54-1c1f-47bc-8669-e56ed280db54\") " pod="openstack/nova-cell0-conductor-db-sync-tfgn7" Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.656344 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-tfgn7\" (UID: \"6393db54-1c1f-47bc-8669-e56ed280db54\") " pod="openstack/nova-cell0-conductor-db-sync-tfgn7" Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.670161 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-df6l6\" (UniqueName: \"kubernetes.io/projected/6393db54-1c1f-47bc-8669-e56ed280db54-kube-api-access-df6l6\") pod \"nova-cell0-conductor-db-sync-tfgn7\" (UID: \"6393db54-1c1f-47bc-8669-e56ed280db54\") " pod="openstack/nova-cell0-conductor-db-sync-tfgn7" Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.678254 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.038339206 podStartE2EDuration="5.678229585s" podCreationTimestamp="2025-11-26 05:45:22 +0000 UTC" firstStartedPulling="2025-11-26 05:45:23.740765399 +0000 UTC m=+1181.923816975" lastFinishedPulling="2025-11-26 05:45:26.380655768 +0000 UTC m=+1184.563707354" observedRunningTime="2025-11-26 05:45:27.669197213 +0000 UTC m=+1185.852248799" watchObservedRunningTime="2025-11-26 05:45:27.678229585 +0000 UTC m=+1185.861281171" Nov 26 05:45:27 crc kubenswrapper[4871]: I1126 05:45:27.841272 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-tfgn7" Nov 26 05:45:28 crc kubenswrapper[4871]: W1126 05:45:28.361858 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6393db54_1c1f_47bc_8669_e56ed280db54.slice/crio-ed297b4a4fc1daac5da5464ce57b3bf0a0c1203f0e53df727ca377e627e39bef WatchSource:0}: Error finding container ed297b4a4fc1daac5da5464ce57b3bf0a0c1203f0e53df727ca377e627e39bef: Status 404 returned error can't find the container with id ed297b4a4fc1daac5da5464ce57b3bf0a0c1203f0e53df727ca377e627e39bef Nov 26 05:45:28 crc kubenswrapper[4871]: I1126 05:45:28.363989 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-tfgn7"] Nov 26 05:45:28 crc kubenswrapper[4871]: I1126 05:45:28.364326 4871 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 05:45:28 crc kubenswrapper[4871]: I1126 05:45:28.662979 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-tfgn7" event={"ID":"6393db54-1c1f-47bc-8669-e56ed280db54","Type":"ContainerStarted","Data":"ed297b4a4fc1daac5da5464ce57b3bf0a0c1203f0e53df727ca377e627e39bef"} Nov 26 05:45:28 crc kubenswrapper[4871]: I1126 05:45:28.663182 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cc93269b-50ab-42ec-b316-00393b3d7751" containerName="ceilometer-central-agent" containerID="cri-o://09724c258dbd4fdee6bf3c02e2df545d1dab0b3e6e40cccfef054365c4cf858f" gracePeriod=30 Nov 26 05:45:28 crc kubenswrapper[4871]: I1126 05:45:28.663284 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cc93269b-50ab-42ec-b316-00393b3d7751" containerName="sg-core" containerID="cri-o://f905198e62a59f32cce4e027ce3975243ab73e386c1a91bc0e65169f4d4e6f43" gracePeriod=30 Nov 26 05:45:28 crc kubenswrapper[4871]: I1126 05:45:28.663320 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cc93269b-50ab-42ec-b316-00393b3d7751" containerName="proxy-httpd" containerID="cri-o://9fc8d2dd9a0b2c8181dbb18519b90acfa2304aa655cb3b8d656960f494713cb1" gracePeriod=30 Nov 26 05:45:28 crc kubenswrapper[4871]: I1126 05:45:28.663326 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cc93269b-50ab-42ec-b316-00393b3d7751" containerName="ceilometer-notification-agent" containerID="cri-o://5f0b2b9d5c4271014fe70b57c1328de4e93437bd862e6aef85401f4a8fbb9102" gracePeriod=30 Nov 26 05:45:29 crc kubenswrapper[4871]: I1126 05:45:29.677937 4871 generic.go:334] "Generic (PLEG): container finished" podID="cc93269b-50ab-42ec-b316-00393b3d7751" containerID="9fc8d2dd9a0b2c8181dbb18519b90acfa2304aa655cb3b8d656960f494713cb1" exitCode=0 Nov 26 05:45:29 crc kubenswrapper[4871]: I1126 05:45:29.678171 4871 generic.go:334] "Generic (PLEG): container finished" podID="cc93269b-50ab-42ec-b316-00393b3d7751" containerID="f905198e62a59f32cce4e027ce3975243ab73e386c1a91bc0e65169f4d4e6f43" exitCode=2 Nov 26 05:45:29 crc kubenswrapper[4871]: I1126 05:45:29.678182 4871 generic.go:334] "Generic (PLEG): container finished" podID="cc93269b-50ab-42ec-b316-00393b3d7751" containerID="5f0b2b9d5c4271014fe70b57c1328de4e93437bd862e6aef85401f4a8fbb9102" exitCode=0 Nov 26 05:45:29 crc kubenswrapper[4871]: I1126 05:45:29.678017 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc93269b-50ab-42ec-b316-00393b3d7751","Type":"ContainerDied","Data":"9fc8d2dd9a0b2c8181dbb18519b90acfa2304aa655cb3b8d656960f494713cb1"} Nov 26 05:45:29 crc kubenswrapper[4871]: I1126 05:45:29.678220 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc93269b-50ab-42ec-b316-00393b3d7751","Type":"ContainerDied","Data":"f905198e62a59f32cce4e027ce3975243ab73e386c1a91bc0e65169f4d4e6f43"} Nov 26 05:45:29 crc kubenswrapper[4871]: I1126 05:45:29.678234 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc93269b-50ab-42ec-b316-00393b3d7751","Type":"ContainerDied","Data":"5f0b2b9d5c4271014fe70b57c1328de4e93437bd862e6aef85401f4a8fbb9102"} Nov 26 05:45:34 crc kubenswrapper[4871]: I1126 05:45:34.082110 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 05:45:34 crc kubenswrapper[4871]: I1126 05:45:34.082854 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="e27715b3-349a-4da9-806b-bac09bc34086" containerName="glance-log" containerID="cri-o://83be37e4ac8fbf2ead804e3b93191a3c997ef0f5ccc9646ddd8581467b4a51ea" gracePeriod=30 Nov 26 05:45:34 crc kubenswrapper[4871]: I1126 05:45:34.083101 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="e27715b3-349a-4da9-806b-bac09bc34086" containerName="glance-httpd" containerID="cri-o://05deadaf9cf068234e04038e52495b9c032ea35da37d52e6e0f831d3200902bf" gracePeriod=30 Nov 26 05:45:34 crc kubenswrapper[4871]: I1126 05:45:34.152341 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 26 05:45:34 crc kubenswrapper[4871]: I1126 05:45:34.200155 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Nov 26 05:45:34 crc kubenswrapper[4871]: I1126 05:45:34.741150 4871 generic.go:334] "Generic (PLEG): container finished" podID="e27715b3-349a-4da9-806b-bac09bc34086" containerID="83be37e4ac8fbf2ead804e3b93191a3c997ef0f5ccc9646ddd8581467b4a51ea" exitCode=143 Nov 26 05:45:34 crc kubenswrapper[4871]: I1126 05:45:34.741255 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e27715b3-349a-4da9-806b-bac09bc34086","Type":"ContainerDied","Data":"83be37e4ac8fbf2ead804e3b93191a3c997ef0f5ccc9646ddd8581467b4a51ea"} Nov 26 05:45:34 crc kubenswrapper[4871]: I1126 05:45:34.741317 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 26 05:45:34 crc kubenswrapper[4871]: I1126 05:45:34.777866 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Nov 26 05:45:34 crc kubenswrapper[4871]: I1126 05:45:34.812366 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 26 05:45:35 crc kubenswrapper[4871]: I1126 05:45:35.209979 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 05:45:35 crc kubenswrapper[4871]: I1126 05:45:35.211856 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="3c03afa4-2257-4e38-b59a-04cdcc8060e4" containerName="glance-httpd" containerID="cri-o://0ecb79534560d8f2f10ebe835141ff761444eaff9dc3f528737ed699b8494ed2" gracePeriod=30 Nov 26 05:45:35 crc kubenswrapper[4871]: I1126 05:45:35.211743 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="3c03afa4-2257-4e38-b59a-04cdcc8060e4" containerName="glance-log" containerID="cri-o://a37f7c1dd217788f012692caa6dcf8d8413cd3b61d4cf3cf6c0084a745500919" gracePeriod=30 Nov 26 05:45:35 crc kubenswrapper[4871]: I1126 05:45:35.756953 4871 generic.go:334] "Generic (PLEG): container finished" podID="e27715b3-349a-4da9-806b-bac09bc34086" containerID="05deadaf9cf068234e04038e52495b9c032ea35da37d52e6e0f831d3200902bf" exitCode=0 Nov 26 05:45:35 crc kubenswrapper[4871]: I1126 05:45:35.757058 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e27715b3-349a-4da9-806b-bac09bc34086","Type":"ContainerDied","Data":"05deadaf9cf068234e04038e52495b9c032ea35da37d52e6e0f831d3200902bf"} Nov 26 05:45:35 crc kubenswrapper[4871]: I1126 05:45:35.759892 4871 generic.go:334] "Generic (PLEG): container finished" podID="3c03afa4-2257-4e38-b59a-04cdcc8060e4" containerID="a37f7c1dd217788f012692caa6dcf8d8413cd3b61d4cf3cf6c0084a745500919" exitCode=143 Nov 26 05:45:35 crc kubenswrapper[4871]: I1126 05:45:35.759976 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3c03afa4-2257-4e38-b59a-04cdcc8060e4","Type":"ContainerDied","Data":"a37f7c1dd217788f012692caa6dcf8d8413cd3b61d4cf3cf6c0084a745500919"} Nov 26 05:45:36 crc kubenswrapper[4871]: I1126 05:45:36.774607 4871 generic.go:334] "Generic (PLEG): container finished" podID="cc93269b-50ab-42ec-b316-00393b3d7751" containerID="09724c258dbd4fdee6bf3c02e2df545d1dab0b3e6e40cccfef054365c4cf858f" exitCode=0 Nov 26 05:45:36 crc kubenswrapper[4871]: I1126 05:45:36.774659 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc93269b-50ab-42ec-b316-00393b3d7751","Type":"ContainerDied","Data":"09724c258dbd4fdee6bf3c02e2df545d1dab0b3e6e40cccfef054365c4cf858f"} Nov 26 05:45:36 crc kubenswrapper[4871]: I1126 05:45:36.777076 4871 generic.go:334] "Generic (PLEG): container finished" podID="3c03afa4-2257-4e38-b59a-04cdcc8060e4" containerID="0ecb79534560d8f2f10ebe835141ff761444eaff9dc3f528737ed699b8494ed2" exitCode=0 Nov 26 05:45:36 crc kubenswrapper[4871]: I1126 05:45:36.777127 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3c03afa4-2257-4e38-b59a-04cdcc8060e4","Type":"ContainerDied","Data":"0ecb79534560d8f2f10ebe835141ff761444eaff9dc3f528737ed699b8494ed2"} Nov 26 05:45:36 crc kubenswrapper[4871]: I1126 05:45:36.777269 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-decision-engine-0" podUID="906807e1-f724-4ab4-9ccc-95656188890e" containerName="watcher-decision-engine" containerID="cri-o://cad0798f2898de3a0b31bc0d4cd4c5c6132d0208856588e89b33940a53278e1d" gracePeriod=30 Nov 26 05:45:38 crc kubenswrapper[4871]: I1126 05:45:38.799389 4871 generic.go:334] "Generic (PLEG): container finished" podID="906807e1-f724-4ab4-9ccc-95656188890e" containerID="cad0798f2898de3a0b31bc0d4cd4c5c6132d0208856588e89b33940a53278e1d" exitCode=0 Nov 26 05:45:38 crc kubenswrapper[4871]: I1126 05:45:38.799452 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"906807e1-f724-4ab4-9ccc-95656188890e","Type":"ContainerDied","Data":"cad0798f2898de3a0b31bc0d4cd4c5c6132d0208856588e89b33940a53278e1d"} Nov 26 05:45:38 crc kubenswrapper[4871]: I1126 05:45:38.799700 4871 scope.go:117] "RemoveContainer" containerID="c0dee9dbf5b0d8070947de7e352e4d9d380476356ecdfa075feffa0ed0cfbdac" Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.819326 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"906807e1-f724-4ab4-9ccc-95656188890e","Type":"ContainerDied","Data":"465e559dca887fb0305a03f78054201387518d7c17dc6ea0b8151956c786aba2"} Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.820406 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="465e559dca887fb0305a03f78054201387518d7c17dc6ea0b8151956c786aba2" Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.824099 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3c03afa4-2257-4e38-b59a-04cdcc8060e4","Type":"ContainerDied","Data":"eaa69e56dc3d992bdc00253a48e26aebda54c4128e6e522089cb7af3e4549700"} Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.824143 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eaa69e56dc3d992bdc00253a48e26aebda54c4128e6e522089cb7af3e4549700" Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.828311 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cc93269b-50ab-42ec-b316-00393b3d7751","Type":"ContainerDied","Data":"95b7458c576b2555ced8543c79adf7e5a1f496f3a8eab0e34df8cbe370c7dec6"} Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.828351 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="95b7458c576b2555ced8543c79adf7e5a1f496f3a8eab0e34df8cbe370c7dec6" Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.828357 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.835753 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.842466 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.974228 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/906807e1-f724-4ab4-9ccc-95656188890e-logs\") pod \"906807e1-f724-4ab4-9ccc-95656188890e\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.974296 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-combined-ca-bundle\") pod \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.974363 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/906807e1-f724-4ab4-9ccc-95656188890e-custom-prometheus-ca\") pod \"906807e1-f724-4ab4-9ccc-95656188890e\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.974403 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-sg-core-conf-yaml\") pod \"cc93269b-50ab-42ec-b316-00393b3d7751\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.974432 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/906807e1-f724-4ab4-9ccc-95656188890e-combined-ca-bundle\") pod \"906807e1-f724-4ab4-9ccc-95656188890e\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.974481 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-combined-ca-bundle\") pod \"cc93269b-50ab-42ec-b316-00393b3d7751\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.974512 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-scripts\") pod \"cc93269b-50ab-42ec-b316-00393b3d7751\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.974920 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c03afa4-2257-4e38-b59a-04cdcc8060e4-logs\") pod \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.974944 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.974969 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztspb\" (UniqueName: \"kubernetes.io/projected/3c03afa4-2257-4e38-b59a-04cdcc8060e4-kube-api-access-ztspb\") pod \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.974987 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc93269b-50ab-42ec-b316-00393b3d7751-log-httpd\") pod \"cc93269b-50ab-42ec-b316-00393b3d7751\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.975009 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc93269b-50ab-42ec-b316-00393b3d7751-run-httpd\") pod \"cc93269b-50ab-42ec-b316-00393b3d7751\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.975042 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4gbqc\" (UniqueName: \"kubernetes.io/projected/cc93269b-50ab-42ec-b316-00393b3d7751-kube-api-access-4gbqc\") pod \"cc93269b-50ab-42ec-b316-00393b3d7751\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.975057 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/906807e1-f724-4ab4-9ccc-95656188890e-config-data\") pod \"906807e1-f724-4ab4-9ccc-95656188890e\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.975074 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-scripts\") pod \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.975123 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-internal-tls-certs\") pod \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.975155 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4gz64\" (UniqueName: \"kubernetes.io/projected/906807e1-f724-4ab4-9ccc-95656188890e-kube-api-access-4gz64\") pod \"906807e1-f724-4ab4-9ccc-95656188890e\" (UID: \"906807e1-f724-4ab4-9ccc-95656188890e\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.975172 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-config-data\") pod \"cc93269b-50ab-42ec-b316-00393b3d7751\" (UID: \"cc93269b-50ab-42ec-b316-00393b3d7751\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.975211 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-config-data\") pod \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.975252 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3c03afa4-2257-4e38-b59a-04cdcc8060e4-httpd-run\") pod \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\" (UID: \"3c03afa4-2257-4e38-b59a-04cdcc8060e4\") " Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.975728 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/906807e1-f724-4ab4-9ccc-95656188890e-logs" (OuterVolumeSpecName: "logs") pod "906807e1-f724-4ab4-9ccc-95656188890e" (UID: "906807e1-f724-4ab4-9ccc-95656188890e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.976070 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c03afa4-2257-4e38-b59a-04cdcc8060e4-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "3c03afa4-2257-4e38-b59a-04cdcc8060e4" (UID: "3c03afa4-2257-4e38-b59a-04cdcc8060e4"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.976943 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc93269b-50ab-42ec-b316-00393b3d7751-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "cc93269b-50ab-42ec-b316-00393b3d7751" (UID: "cc93269b-50ab-42ec-b316-00393b3d7751"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.980801 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-scripts" (OuterVolumeSpecName: "scripts") pod "3c03afa4-2257-4e38-b59a-04cdcc8060e4" (UID: "3c03afa4-2257-4e38-b59a-04cdcc8060e4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.981069 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc93269b-50ab-42ec-b316-00393b3d7751-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "cc93269b-50ab-42ec-b316-00393b3d7751" (UID: "cc93269b-50ab-42ec-b316-00393b3d7751"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.985177 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-scripts" (OuterVolumeSpecName: "scripts") pod "cc93269b-50ab-42ec-b316-00393b3d7751" (UID: "cc93269b-50ab-42ec-b316-00393b3d7751"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.988751 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/906807e1-f724-4ab4-9ccc-95656188890e-kube-api-access-4gz64" (OuterVolumeSpecName: "kube-api-access-4gz64") pod "906807e1-f724-4ab4-9ccc-95656188890e" (UID: "906807e1-f724-4ab4-9ccc-95656188890e"). InnerVolumeSpecName "kube-api-access-4gz64". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.992617 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc93269b-50ab-42ec-b316-00393b3d7751-kube-api-access-4gbqc" (OuterVolumeSpecName: "kube-api-access-4gbqc") pod "cc93269b-50ab-42ec-b316-00393b3d7751" (UID: "cc93269b-50ab-42ec-b316-00393b3d7751"). InnerVolumeSpecName "kube-api-access-4gbqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.992737 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "3c03afa4-2257-4e38-b59a-04cdcc8060e4" (UID: "3c03afa4-2257-4e38-b59a-04cdcc8060e4"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 05:45:39 crc kubenswrapper[4871]: I1126 05:45:39.993376 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c03afa4-2257-4e38-b59a-04cdcc8060e4-logs" (OuterVolumeSpecName: "logs") pod "3c03afa4-2257-4e38-b59a-04cdcc8060e4" (UID: "3c03afa4-2257-4e38-b59a-04cdcc8060e4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.007318 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c03afa4-2257-4e38-b59a-04cdcc8060e4-kube-api-access-ztspb" (OuterVolumeSpecName: "kube-api-access-ztspb") pod "3c03afa4-2257-4e38-b59a-04cdcc8060e4" (UID: "3c03afa4-2257-4e38-b59a-04cdcc8060e4"). InnerVolumeSpecName "kube-api-access-ztspb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.027588 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/906807e1-f724-4ab4-9ccc-95656188890e-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "906807e1-f724-4ab4-9ccc-95656188890e" (UID: "906807e1-f724-4ab4-9ccc-95656188890e"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.034027 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/906807e1-f724-4ab4-9ccc-95656188890e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "906807e1-f724-4ab4-9ccc-95656188890e" (UID: "906807e1-f724-4ab4-9ccc-95656188890e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.036992 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3c03afa4-2257-4e38-b59a-04cdcc8060e4" (UID: "3c03afa4-2257-4e38-b59a-04cdcc8060e4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.069765 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "cc93269b-50ab-42ec-b316-00393b3d7751" (UID: "cc93269b-50ab-42ec-b316-00393b3d7751"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.077119 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4gz64\" (UniqueName: \"kubernetes.io/projected/906807e1-f724-4ab4-9ccc-95656188890e-kube-api-access-4gz64\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.077159 4871 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3c03afa4-2257-4e38-b59a-04cdcc8060e4-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.077195 4871 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/906807e1-f724-4ab4-9ccc-95656188890e-logs\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.077208 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.077219 4871 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/906807e1-f724-4ab4-9ccc-95656188890e-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.077230 4871 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.077240 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/906807e1-f724-4ab4-9ccc-95656188890e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.077272 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.077285 4871 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3c03afa4-2257-4e38-b59a-04cdcc8060e4-logs\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.077459 4871 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.077479 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztspb\" (UniqueName: \"kubernetes.io/projected/3c03afa4-2257-4e38-b59a-04cdcc8060e4-kube-api-access-ztspb\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.077491 4871 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc93269b-50ab-42ec-b316-00393b3d7751-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.077501 4871 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cc93269b-50ab-42ec-b316-00393b3d7751-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.077512 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4gbqc\" (UniqueName: \"kubernetes.io/projected/cc93269b-50ab-42ec-b316-00393b3d7751-kube-api-access-4gbqc\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.077590 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.090621 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3c03afa4-2257-4e38-b59a-04cdcc8060e4" (UID: "3c03afa4-2257-4e38-b59a-04cdcc8060e4"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.094813 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/906807e1-f724-4ab4-9ccc-95656188890e-config-data" (OuterVolumeSpecName: "config-data") pod "906807e1-f724-4ab4-9ccc-95656188890e" (UID: "906807e1-f724-4ab4-9ccc-95656188890e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.101050 4871 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.110496 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-config-data" (OuterVolumeSpecName: "config-data") pod "3c03afa4-2257-4e38-b59a-04cdcc8060e4" (UID: "3c03afa4-2257-4e38-b59a-04cdcc8060e4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.132324 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cc93269b-50ab-42ec-b316-00393b3d7751" (UID: "cc93269b-50ab-42ec-b316-00393b3d7751"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.137007 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-config-data" (OuterVolumeSpecName: "config-data") pod "cc93269b-50ab-42ec-b316-00393b3d7751" (UID: "cc93269b-50ab-42ec-b316-00393b3d7751"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.179280 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.179547 4871 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.179563 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/906807e1-f724-4ab4-9ccc-95656188890e-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.179574 4871 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.179585 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc93269b-50ab-42ec-b316-00393b3d7751-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.179595 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c03afa4-2257-4e38-b59a-04cdcc8060e4-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.716259 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.840282 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e27715b3-349a-4da9-806b-bac09bc34086","Type":"ContainerDied","Data":"04cef573aea9df6791ee2358661bf6148dcf256931f96704ad03c6814bd62332"} Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.841408 4871 scope.go:117] "RemoveContainer" containerID="05deadaf9cf068234e04038e52495b9c032ea35da37d52e6e0f831d3200902bf" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.840534 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.842277 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-tfgn7" event={"ID":"6393db54-1c1f-47bc-8669-e56ed280db54","Type":"ContainerStarted","Data":"0b3f41dc451b2d5092df4530c819384ade2cd6e8bcc1cc5cc6ed1ee49c17ce9e"} Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.845980 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.846007 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.846013 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.865685 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-tfgn7" podStartSLOduration=2.428479091 podStartE2EDuration="13.865666048s" podCreationTimestamp="2025-11-26 05:45:27 +0000 UTC" firstStartedPulling="2025-11-26 05:45:28.364124243 +0000 UTC m=+1186.547175829" lastFinishedPulling="2025-11-26 05:45:39.80131119 +0000 UTC m=+1197.984362786" observedRunningTime="2025-11-26 05:45:40.857870566 +0000 UTC m=+1199.040922152" watchObservedRunningTime="2025-11-26 05:45:40.865666048 +0000 UTC m=+1199.048717634" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.877577 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.881286 4871 scope.go:117] "RemoveContainer" containerID="83be37e4ac8fbf2ead804e3b93191a3c997ef0f5ccc9646ddd8581467b4a51ea" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.887757 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.892863 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-combined-ca-bundle\") pod \"e27715b3-349a-4da9-806b-bac09bc34086\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.893564 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"e27715b3-349a-4da9-806b-bac09bc34086\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.893714 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e27715b3-349a-4da9-806b-bac09bc34086-logs\") pod \"e27715b3-349a-4da9-806b-bac09bc34086\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.893845 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e27715b3-349a-4da9-806b-bac09bc34086-httpd-run\") pod \"e27715b3-349a-4da9-806b-bac09bc34086\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.893990 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-config-data\") pod \"e27715b3-349a-4da9-806b-bac09bc34086\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.894157 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6w2mt\" (UniqueName: \"kubernetes.io/projected/e27715b3-349a-4da9-806b-bac09bc34086-kube-api-access-6w2mt\") pod \"e27715b3-349a-4da9-806b-bac09bc34086\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.894270 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-scripts\") pod \"e27715b3-349a-4da9-806b-bac09bc34086\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.894400 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-public-tls-certs\") pod \"e27715b3-349a-4da9-806b-bac09bc34086\" (UID: \"e27715b3-349a-4da9-806b-bac09bc34086\") " Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.894696 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e27715b3-349a-4da9-806b-bac09bc34086-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "e27715b3-349a-4da9-806b-bac09bc34086" (UID: "e27715b3-349a-4da9-806b-bac09bc34086"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.895094 4871 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e27715b3-349a-4da9-806b-bac09bc34086-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.901631 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.904788 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e27715b3-349a-4da9-806b-bac09bc34086-logs" (OuterVolumeSpecName: "logs") pod "e27715b3-349a-4da9-806b-bac09bc34086" (UID: "e27715b3-349a-4da9-806b-bac09bc34086"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.914540 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance") pod "e27715b3-349a-4da9-806b-bac09bc34086" (UID: "e27715b3-349a-4da9-806b-bac09bc34086"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.918716 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e27715b3-349a-4da9-806b-bac09bc34086-kube-api-access-6w2mt" (OuterVolumeSpecName: "kube-api-access-6w2mt") pod "e27715b3-349a-4da9-806b-bac09bc34086" (UID: "e27715b3-349a-4da9-806b-bac09bc34086"). InnerVolumeSpecName "kube-api-access-6w2mt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.936662 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.950653 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-scripts" (OuterVolumeSpecName: "scripts") pod "e27715b3-349a-4da9-806b-bac09bc34086" (UID: "e27715b3-349a-4da9-806b-bac09bc34086"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.983605 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.992459 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:40 crc kubenswrapper[4871]: E1126 05:45:40.992889 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c03afa4-2257-4e38-b59a-04cdcc8060e4" containerName="glance-log" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.992913 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c03afa4-2257-4e38-b59a-04cdcc8060e4" containerName="glance-log" Nov 26 05:45:40 crc kubenswrapper[4871]: E1126 05:45:40.992928 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e27715b3-349a-4da9-806b-bac09bc34086" containerName="glance-log" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.992934 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="e27715b3-349a-4da9-806b-bac09bc34086" containerName="glance-log" Nov 26 05:45:40 crc kubenswrapper[4871]: E1126 05:45:40.992940 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e27715b3-349a-4da9-806b-bac09bc34086" containerName="glance-httpd" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.992946 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="e27715b3-349a-4da9-806b-bac09bc34086" containerName="glance-httpd" Nov 26 05:45:40 crc kubenswrapper[4871]: E1126 05:45:40.992955 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc93269b-50ab-42ec-b316-00393b3d7751" containerName="ceilometer-central-agent" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.992961 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc93269b-50ab-42ec-b316-00393b3d7751" containerName="ceilometer-central-agent" Nov 26 05:45:40 crc kubenswrapper[4871]: E1126 05:45:40.992973 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="906807e1-f724-4ab4-9ccc-95656188890e" containerName="watcher-decision-engine" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.992978 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="906807e1-f724-4ab4-9ccc-95656188890e" containerName="watcher-decision-engine" Nov 26 05:45:40 crc kubenswrapper[4871]: E1126 05:45:40.992997 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc93269b-50ab-42ec-b316-00393b3d7751" containerName="ceilometer-notification-agent" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.993004 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc93269b-50ab-42ec-b316-00393b3d7751" containerName="ceilometer-notification-agent" Nov 26 05:45:40 crc kubenswrapper[4871]: E1126 05:45:40.993015 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc93269b-50ab-42ec-b316-00393b3d7751" containerName="proxy-httpd" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.993021 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc93269b-50ab-42ec-b316-00393b3d7751" containerName="proxy-httpd" Nov 26 05:45:40 crc kubenswrapper[4871]: E1126 05:45:40.993034 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="906807e1-f724-4ab4-9ccc-95656188890e" containerName="watcher-decision-engine" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.993041 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="906807e1-f724-4ab4-9ccc-95656188890e" containerName="watcher-decision-engine" Nov 26 05:45:40 crc kubenswrapper[4871]: E1126 05:45:40.993052 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="906807e1-f724-4ab4-9ccc-95656188890e" containerName="watcher-decision-engine" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.993057 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="906807e1-f724-4ab4-9ccc-95656188890e" containerName="watcher-decision-engine" Nov 26 05:45:40 crc kubenswrapper[4871]: E1126 05:45:40.993070 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="906807e1-f724-4ab4-9ccc-95656188890e" containerName="watcher-decision-engine" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.993076 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="906807e1-f724-4ab4-9ccc-95656188890e" containerName="watcher-decision-engine" Nov 26 05:45:40 crc kubenswrapper[4871]: E1126 05:45:40.993086 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc93269b-50ab-42ec-b316-00393b3d7751" containerName="sg-core" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.993093 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc93269b-50ab-42ec-b316-00393b3d7751" containerName="sg-core" Nov 26 05:45:40 crc kubenswrapper[4871]: E1126 05:45:40.993102 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c03afa4-2257-4e38-b59a-04cdcc8060e4" containerName="glance-httpd" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.993107 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c03afa4-2257-4e38-b59a-04cdcc8060e4" containerName="glance-httpd" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.993258 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc93269b-50ab-42ec-b316-00393b3d7751" containerName="ceilometer-notification-agent" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.993271 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc93269b-50ab-42ec-b316-00393b3d7751" containerName="sg-core" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.993281 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c03afa4-2257-4e38-b59a-04cdcc8060e4" containerName="glance-httpd" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.993289 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc93269b-50ab-42ec-b316-00393b3d7751" containerName="proxy-httpd" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.993300 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="906807e1-f724-4ab4-9ccc-95656188890e" containerName="watcher-decision-engine" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.993306 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="e27715b3-349a-4da9-806b-bac09bc34086" containerName="glance-httpd" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.993318 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c03afa4-2257-4e38-b59a-04cdcc8060e4" containerName="glance-log" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.993330 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="e27715b3-349a-4da9-806b-bac09bc34086" containerName="glance-log" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.993341 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="906807e1-f724-4ab4-9ccc-95656188890e" containerName="watcher-decision-engine" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.993352 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc93269b-50ab-42ec-b316-00393b3d7751" containerName="ceilometer-central-agent" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.993359 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="906807e1-f724-4ab4-9ccc-95656188890e" containerName="watcher-decision-engine" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.993761 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="906807e1-f724-4ab4-9ccc-95656188890e" containerName="watcher-decision-engine" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.995077 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.998172 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6w2mt\" (UniqueName: \"kubernetes.io/projected/e27715b3-349a-4da9-806b-bac09bc34086-kube-api-access-6w2mt\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.998204 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.998229 4871 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 26 05:45:40 crc kubenswrapper[4871]: I1126 05:45:40.998241 4871 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e27715b3-349a-4da9-806b-bac09bc34086-logs\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.000749 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.001126 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.005418 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.017370 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.019593 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.021399 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.022675 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e27715b3-349a-4da9-806b-bac09bc34086" (UID: "e27715b3-349a-4da9-806b-bac09bc34086"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.036222 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-config-data" (OuterVolumeSpecName: "config-data") pod "e27715b3-349a-4da9-806b-bac09bc34086" (UID: "e27715b3-349a-4da9-806b-bac09bc34086"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.036985 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.041698 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "e27715b3-349a-4da9-806b-bac09bc34086" (UID: "e27715b3-349a-4da9-806b-bac09bc34086"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.042087 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.043134 4871 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.051293 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.053293 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.055367 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.068574 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.081933 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.100167 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-run-httpd\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.100316 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-scripts\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.100373 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-log-httpd\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.100399 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-472ck\" (UniqueName: \"kubernetes.io/projected/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-kube-api-access-472ck\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.100429 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.100478 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.100508 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-config-data\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.100633 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.100651 4871 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.100663 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.100674 4871 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e27715b3-349a-4da9-806b-bac09bc34086-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.175919 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.184960 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.202330 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/de5b1e93-a28e-405b-8ab4-a1bc50922b2e-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"de5b1e93-a28e-405b-8ab4-a1bc50922b2e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.202401 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de5b1e93-a28e-405b-8ab4-a1bc50922b2e-config-data\") pod \"watcher-decision-engine-0\" (UID: \"de5b1e93-a28e-405b-8ab4-a1bc50922b2e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.202550 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-run-httpd\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.202602 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a199844c-c13e-47ce-8980-b3292e3435b3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.202647 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de5b1e93-a28e-405b-8ab4-a1bc50922b2e-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"de5b1e93-a28e-405b-8ab4-a1bc50922b2e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.202715 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a199844c-c13e-47ce-8980-b3292e3435b3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.202875 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a199844c-c13e-47ce-8980-b3292e3435b3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.203025 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-scripts\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.203076 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-run-httpd\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.203077 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-log-httpd\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.203132 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-472ck\" (UniqueName: \"kubernetes.io/projected/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-kube-api-access-472ck\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.203164 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a199844c-c13e-47ce-8980-b3292e3435b3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.203180 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfv9s\" (UniqueName: \"kubernetes.io/projected/a199844c-c13e-47ce-8980-b3292e3435b3-kube-api-access-zfv9s\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.203201 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.203219 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.203235 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de5b1e93-a28e-405b-8ab4-a1bc50922b2e-logs\") pod \"watcher-decision-engine-0\" (UID: \"de5b1e93-a28e-405b-8ab4-a1bc50922b2e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.203258 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a199844c-c13e-47ce-8980-b3292e3435b3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.203279 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2sn2j\" (UniqueName: \"kubernetes.io/projected/de5b1e93-a28e-405b-8ab4-a1bc50922b2e-kube-api-access-2sn2j\") pod \"watcher-decision-engine-0\" (UID: \"de5b1e93-a28e-405b-8ab4-a1bc50922b2e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.203298 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a199844c-c13e-47ce-8980-b3292e3435b3-logs\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.203318 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.203337 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-config-data\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.203476 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-log-httpd\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.206630 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.207956 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.209461 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.210873 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.212779 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-scripts\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.216634 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-config-data\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.221119 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.221350 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.224086 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-472ck\" (UniqueName: \"kubernetes.io/projected/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-kube-api-access-472ck\") pod \"ceilometer-0\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.227552 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.305744 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/de5b1e93-a28e-405b-8ab4-a1bc50922b2e-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"de5b1e93-a28e-405b-8ab4-a1bc50922b2e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.305800 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de5b1e93-a28e-405b-8ab4-a1bc50922b2e-config-data\") pod \"watcher-decision-engine-0\" (UID: \"de5b1e93-a28e-405b-8ab4-a1bc50922b2e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.305864 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/206a6ff7-c300-42b8-9816-a272aacc0d94-logs\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.305894 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a199844c-c13e-47ce-8980-b3292e3435b3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.305918 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de5b1e93-a28e-405b-8ab4-a1bc50922b2e-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"de5b1e93-a28e-405b-8ab4-a1bc50922b2e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.305958 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a199844c-c13e-47ce-8980-b3292e3435b3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.305994 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a199844c-c13e-47ce-8980-b3292e3435b3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.306043 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.306076 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/206a6ff7-c300-42b8-9816-a272aacc0d94-config-data\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.306098 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trrrm\" (UniqueName: \"kubernetes.io/projected/206a6ff7-c300-42b8-9816-a272aacc0d94-kube-api-access-trrrm\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.306125 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/206a6ff7-c300-42b8-9816-a272aacc0d94-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.306147 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a199844c-c13e-47ce-8980-b3292e3435b3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.306167 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfv9s\" (UniqueName: \"kubernetes.io/projected/a199844c-c13e-47ce-8980-b3292e3435b3-kube-api-access-zfv9s\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.306189 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de5b1e93-a28e-405b-8ab4-a1bc50922b2e-logs\") pod \"watcher-decision-engine-0\" (UID: \"de5b1e93-a28e-405b-8ab4-a1bc50922b2e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.306210 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.306229 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/206a6ff7-c300-42b8-9816-a272aacc0d94-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.306248 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/206a6ff7-c300-42b8-9816-a272aacc0d94-scripts\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.306269 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/206a6ff7-c300-42b8-9816-a272aacc0d94-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.306290 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a199844c-c13e-47ce-8980-b3292e3435b3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.306313 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2sn2j\" (UniqueName: \"kubernetes.io/projected/de5b1e93-a28e-405b-8ab4-a1bc50922b2e-kube-api-access-2sn2j\") pod \"watcher-decision-engine-0\" (UID: \"de5b1e93-a28e-405b-8ab4-a1bc50922b2e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.306336 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a199844c-c13e-47ce-8980-b3292e3435b3-logs\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.307029 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a199844c-c13e-47ce-8980-b3292e3435b3-logs\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.308309 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a199844c-c13e-47ce-8980-b3292e3435b3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.308748 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.310218 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/de5b1e93-a28e-405b-8ab4-a1bc50922b2e-logs\") pod \"watcher-decision-engine-0\" (UID: \"de5b1e93-a28e-405b-8ab4-a1bc50922b2e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.311224 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de5b1e93-a28e-405b-8ab4-a1bc50922b2e-config-data\") pod \"watcher-decision-engine-0\" (UID: \"de5b1e93-a28e-405b-8ab4-a1bc50922b2e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.313635 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a199844c-c13e-47ce-8980-b3292e3435b3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.314220 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a199844c-c13e-47ce-8980-b3292e3435b3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.314232 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a199844c-c13e-47ce-8980-b3292e3435b3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.316662 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/de5b1e93-a28e-405b-8ab4-a1bc50922b2e-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"de5b1e93-a28e-405b-8ab4-a1bc50922b2e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.322287 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a199844c-c13e-47ce-8980-b3292e3435b3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.324075 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de5b1e93-a28e-405b-8ab4-a1bc50922b2e-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"de5b1e93-a28e-405b-8ab4-a1bc50922b2e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.327293 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2sn2j\" (UniqueName: \"kubernetes.io/projected/de5b1e93-a28e-405b-8ab4-a1bc50922b2e-kube-api-access-2sn2j\") pod \"watcher-decision-engine-0\" (UID: \"de5b1e93-a28e-405b-8ab4-a1bc50922b2e\") " pod="openstack/watcher-decision-engine-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.328950 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfv9s\" (UniqueName: \"kubernetes.io/projected/a199844c-c13e-47ce-8980-b3292e3435b3-kube-api-access-zfv9s\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.350876 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"a199844c-c13e-47ce-8980-b3292e3435b3\") " pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.363657 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.379322 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.390078 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.407892 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/206a6ff7-c300-42b8-9816-a272aacc0d94-logs\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.407976 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.408008 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/206a6ff7-c300-42b8-9816-a272aacc0d94-config-data\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.408026 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trrrm\" (UniqueName: \"kubernetes.io/projected/206a6ff7-c300-42b8-9816-a272aacc0d94-kube-api-access-trrrm\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.408046 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/206a6ff7-c300-42b8-9816-a272aacc0d94-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.408070 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/206a6ff7-c300-42b8-9816-a272aacc0d94-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.408085 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/206a6ff7-c300-42b8-9816-a272aacc0d94-scripts\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.408101 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/206a6ff7-c300-42b8-9816-a272aacc0d94-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.408230 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.410544 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/206a6ff7-c300-42b8-9816-a272aacc0d94-logs\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.411142 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/206a6ff7-c300-42b8-9816-a272aacc0d94-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.412466 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/206a6ff7-c300-42b8-9816-a272aacc0d94-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.434373 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/206a6ff7-c300-42b8-9816-a272aacc0d94-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.434807 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/206a6ff7-c300-42b8-9816-a272aacc0d94-config-data\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.435033 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/206a6ff7-c300-42b8-9816-a272aacc0d94-scripts\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.442982 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trrrm\" (UniqueName: \"kubernetes.io/projected/206a6ff7-c300-42b8-9816-a272aacc0d94-kube-api-access-trrrm\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.462299 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"206a6ff7-c300-42b8-9816-a272aacc0d94\") " pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.735801 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.990909 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 05:45:41 crc kubenswrapper[4871]: W1126 05:45:41.997826 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda199844c_c13e_47ce_8980_b3292e3435b3.slice/crio-fa16cb666504084592bc70194b6e9e1ba5b5e923e3ac46b445493d53ae3c92a6 WatchSource:0}: Error finding container fa16cb666504084592bc70194b6e9e1ba5b5e923e3ac46b445493d53ae3c92a6: Status 404 returned error can't find the container with id fa16cb666504084592bc70194b6e9e1ba5b5e923e3ac46b445493d53ae3c92a6 Nov 26 05:45:41 crc kubenswrapper[4871]: I1126 05:45:41.998907 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 26 05:45:42 crc kubenswrapper[4871]: W1126 05:45:42.005127 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda39dcb96_48e9_44da_a1c1_9918ff4ff2a9.slice/crio-d90e439e923bc6c1035d22d60f250d96f20c63e96c3c1f7d11f264964e8ff48e WatchSource:0}: Error finding container d90e439e923bc6c1035d22d60f250d96f20c63e96c3c1f7d11f264964e8ff48e: Status 404 returned error can't find the container with id d90e439e923bc6c1035d22d60f250d96f20c63e96c3c1f7d11f264964e8ff48e Nov 26 05:45:42 crc kubenswrapper[4871]: I1126 05:45:42.009283 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:42 crc kubenswrapper[4871]: I1126 05:45:42.287072 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 05:45:42 crc kubenswrapper[4871]: W1126 05:45:42.312325 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod206a6ff7_c300_42b8_9816_a272aacc0d94.slice/crio-376ebbcf274dcd2863924eeead3eeb6cbda33496d4fc8047f088e875c7b85064 WatchSource:0}: Error finding container 376ebbcf274dcd2863924eeead3eeb6cbda33496d4fc8047f088e875c7b85064: Status 404 returned error can't find the container with id 376ebbcf274dcd2863924eeead3eeb6cbda33496d4fc8047f088e875c7b85064 Nov 26 05:45:42 crc kubenswrapper[4871]: I1126 05:45:42.523946 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c03afa4-2257-4e38-b59a-04cdcc8060e4" path="/var/lib/kubelet/pods/3c03afa4-2257-4e38-b59a-04cdcc8060e4/volumes" Nov 26 05:45:42 crc kubenswrapper[4871]: I1126 05:45:42.525031 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="906807e1-f724-4ab4-9ccc-95656188890e" path="/var/lib/kubelet/pods/906807e1-f724-4ab4-9ccc-95656188890e/volumes" Nov 26 05:45:42 crc kubenswrapper[4871]: I1126 05:45:42.525826 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc93269b-50ab-42ec-b316-00393b3d7751" path="/var/lib/kubelet/pods/cc93269b-50ab-42ec-b316-00393b3d7751/volumes" Nov 26 05:45:42 crc kubenswrapper[4871]: I1126 05:45:42.534450 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e27715b3-349a-4da9-806b-bac09bc34086" path="/var/lib/kubelet/pods/e27715b3-349a-4da9-806b-bac09bc34086/volumes" Nov 26 05:45:42 crc kubenswrapper[4871]: I1126 05:45:42.891744 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a199844c-c13e-47ce-8980-b3292e3435b3","Type":"ContainerStarted","Data":"28e930af26ac620934163a90dc3874079deb57cdc387629933b311151e8ff203"} Nov 26 05:45:42 crc kubenswrapper[4871]: I1126 05:45:42.892816 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a199844c-c13e-47ce-8980-b3292e3435b3","Type":"ContainerStarted","Data":"fa16cb666504084592bc70194b6e9e1ba5b5e923e3ac46b445493d53ae3c92a6"} Nov 26 05:45:42 crc kubenswrapper[4871]: I1126 05:45:42.896811 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9","Type":"ContainerStarted","Data":"7ef0776a90f2cfdff16f3590c16558106094c8e1739a4b5a691c5c6003716c42"} Nov 26 05:45:42 crc kubenswrapper[4871]: I1126 05:45:42.896995 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9","Type":"ContainerStarted","Data":"d90e439e923bc6c1035d22d60f250d96f20c63e96c3c1f7d11f264964e8ff48e"} Nov 26 05:45:42 crc kubenswrapper[4871]: I1126 05:45:42.899038 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"de5b1e93-a28e-405b-8ab4-a1bc50922b2e","Type":"ContainerStarted","Data":"cc54e557ed2aeaca390b97a2d6bdaabed4adf91a13949244ade3e8858893fd51"} Nov 26 05:45:42 crc kubenswrapper[4871]: I1126 05:45:42.899169 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"de5b1e93-a28e-405b-8ab4-a1bc50922b2e","Type":"ContainerStarted","Data":"588a60a6ce7e95ca69e96d43d955ad6aded8068e31385ea5da3d86dab1a6ac61"} Nov 26 05:45:42 crc kubenswrapper[4871]: I1126 05:45:42.901763 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"206a6ff7-c300-42b8-9816-a272aacc0d94","Type":"ContainerStarted","Data":"376ebbcf274dcd2863924eeead3eeb6cbda33496d4fc8047f088e875c7b85064"} Nov 26 05:45:42 crc kubenswrapper[4871]: I1126 05:45:42.922247 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=2.922225444 podStartE2EDuration="2.922225444s" podCreationTimestamp="2025-11-26 05:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:45:42.915688604 +0000 UTC m=+1201.098740190" watchObservedRunningTime="2025-11-26 05:45:42.922225444 +0000 UTC m=+1201.105277030" Nov 26 05:45:43 crc kubenswrapper[4871]: I1126 05:45:43.928149 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9","Type":"ContainerStarted","Data":"0b4404927d7d506e0c1aed4e761e167dbc336a95a2948177cb46ba4fd6144721"} Nov 26 05:45:43 crc kubenswrapper[4871]: I1126 05:45:43.928449 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9","Type":"ContainerStarted","Data":"4c10a47e5b6cc0a7e87487ef4862487f9d0490fccfaadf005ce3405f868854e9"} Nov 26 05:45:43 crc kubenswrapper[4871]: I1126 05:45:43.931555 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"206a6ff7-c300-42b8-9816-a272aacc0d94","Type":"ContainerStarted","Data":"83252601737dd715e47d48d3251051955c3558a08757cf7edacbe2d7a198ed0c"} Nov 26 05:45:43 crc kubenswrapper[4871]: I1126 05:45:43.931582 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"206a6ff7-c300-42b8-9816-a272aacc0d94","Type":"ContainerStarted","Data":"9759c27544505cd5d22c1b23fafcead002e0e90fcb5e2dd7c7ff244e962bf60c"} Nov 26 05:45:43 crc kubenswrapper[4871]: I1126 05:45:43.933447 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a199844c-c13e-47ce-8980-b3292e3435b3","Type":"ContainerStarted","Data":"930fb7e2fa7be34a3e951b6789bc51b978dd7f78a61dbfa69a84e871cc4439cd"} Nov 26 05:45:43 crc kubenswrapper[4871]: I1126 05:45:43.965558 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=2.965541795 podStartE2EDuration="2.965541795s" podCreationTimestamp="2025-11-26 05:45:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:45:43.953710115 +0000 UTC m=+1202.136761701" watchObservedRunningTime="2025-11-26 05:45:43.965541795 +0000 UTC m=+1202.148593381" Nov 26 05:45:43 crc kubenswrapper[4871]: I1126 05:45:43.982832 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.982813299 podStartE2EDuration="3.982813299s" podCreationTimestamp="2025-11-26 05:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:45:43.976931845 +0000 UTC m=+1202.159983441" watchObservedRunningTime="2025-11-26 05:45:43.982813299 +0000 UTC m=+1202.165864885" Nov 26 05:45:45 crc kubenswrapper[4871]: I1126 05:45:45.962427 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9","Type":"ContainerStarted","Data":"9cf6b667b683ed75dba761fa389389c33850b2e77f9edcd1d009e4b3687ce626"} Nov 26 05:45:45 crc kubenswrapper[4871]: I1126 05:45:45.963669 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 05:45:45 crc kubenswrapper[4871]: I1126 05:45:45.990433 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.161744957 podStartE2EDuration="5.990415695s" podCreationTimestamp="2025-11-26 05:45:40 +0000 UTC" firstStartedPulling="2025-11-26 05:45:42.007896215 +0000 UTC m=+1200.190947801" lastFinishedPulling="2025-11-26 05:45:44.836566953 +0000 UTC m=+1203.019618539" observedRunningTime="2025-11-26 05:45:45.984609853 +0000 UTC m=+1204.167661439" watchObservedRunningTime="2025-11-26 05:45:45.990415695 +0000 UTC m=+1204.173467281" Nov 26 05:45:49 crc kubenswrapper[4871]: I1126 05:45:49.187572 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:49 crc kubenswrapper[4871]: I1126 05:45:49.188107 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerName="ceilometer-central-agent" containerID="cri-o://7ef0776a90f2cfdff16f3590c16558106094c8e1739a4b5a691c5c6003716c42" gracePeriod=30 Nov 26 05:45:49 crc kubenswrapper[4871]: I1126 05:45:49.188177 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerName="proxy-httpd" containerID="cri-o://9cf6b667b683ed75dba761fa389389c33850b2e77f9edcd1d009e4b3687ce626" gracePeriod=30 Nov 26 05:45:49 crc kubenswrapper[4871]: I1126 05:45:49.188213 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerName="sg-core" containerID="cri-o://0b4404927d7d506e0c1aed4e761e167dbc336a95a2948177cb46ba4fd6144721" gracePeriod=30 Nov 26 05:45:49 crc kubenswrapper[4871]: I1126 05:45:49.188266 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerName="ceilometer-notification-agent" containerID="cri-o://4c10a47e5b6cc0a7e87487ef4862487f9d0490fccfaadf005ce3405f868854e9" gracePeriod=30 Nov 26 05:45:50 crc kubenswrapper[4871]: I1126 05:45:50.009360 4871 generic.go:334] "Generic (PLEG): container finished" podID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerID="9cf6b667b683ed75dba761fa389389c33850b2e77f9edcd1d009e4b3687ce626" exitCode=0 Nov 26 05:45:50 crc kubenswrapper[4871]: I1126 05:45:50.009391 4871 generic.go:334] "Generic (PLEG): container finished" podID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerID="0b4404927d7d506e0c1aed4e761e167dbc336a95a2948177cb46ba4fd6144721" exitCode=2 Nov 26 05:45:50 crc kubenswrapper[4871]: I1126 05:45:50.009403 4871 generic.go:334] "Generic (PLEG): container finished" podID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerID="4c10a47e5b6cc0a7e87487ef4862487f9d0490fccfaadf005ce3405f868854e9" exitCode=0 Nov 26 05:45:50 crc kubenswrapper[4871]: I1126 05:45:50.009420 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9","Type":"ContainerDied","Data":"9cf6b667b683ed75dba761fa389389c33850b2e77f9edcd1d009e4b3687ce626"} Nov 26 05:45:50 crc kubenswrapper[4871]: I1126 05:45:50.009452 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9","Type":"ContainerDied","Data":"0b4404927d7d506e0c1aed4e761e167dbc336a95a2948177cb46ba4fd6144721"} Nov 26 05:45:50 crc kubenswrapper[4871]: I1126 05:45:50.009464 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9","Type":"ContainerDied","Data":"4c10a47e5b6cc0a7e87487ef4862487f9d0490fccfaadf005ce3405f868854e9"} Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.380133 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.380423 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.391817 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.426440 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.439000 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.451297 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.580861 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.737438 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.737728 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.742690 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-combined-ca-bundle\") pod \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.742743 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-sg-core-conf-yaml\") pod \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.742772 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-log-httpd\") pod \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.742836 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-config-data\") pod \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.743138 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-run-httpd\") pod \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.743310 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-scripts\") pod \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.743346 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-472ck\" (UniqueName: \"kubernetes.io/projected/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-kube-api-access-472ck\") pod \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\" (UID: \"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9\") " Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.743568 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" (UID: "a39dcb96-48e9-44da-a1c1-9918ff4ff2a9"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.743942 4871 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.743988 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" (UID: "a39dcb96-48e9-44da-a1c1-9918ff4ff2a9"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.752865 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-scripts" (OuterVolumeSpecName: "scripts") pod "a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" (UID: "a39dcb96-48e9-44da-a1c1-9918ff4ff2a9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.753052 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-kube-api-access-472ck" (OuterVolumeSpecName: "kube-api-access-472ck") pod "a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" (UID: "a39dcb96-48e9-44da-a1c1-9918ff4ff2a9"). InnerVolumeSpecName "kube-api-access-472ck". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.774771 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" (UID: "a39dcb96-48e9-44da-a1c1-9918ff4ff2a9"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.780908 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.787383 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.821149 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" (UID: "a39dcb96-48e9-44da-a1c1-9918ff4ff2a9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.846868 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.846931 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-472ck\" (UniqueName: \"kubernetes.io/projected/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-kube-api-access-472ck\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.846948 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.846962 4871 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.846974 4871 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.859925 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-config-data" (OuterVolumeSpecName: "config-data") pod "a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" (UID: "a39dcb96-48e9-44da-a1c1-9918ff4ff2a9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:51 crc kubenswrapper[4871]: I1126 05:45:51.948272 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.036822 4871 generic.go:334] "Generic (PLEG): container finished" podID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerID="7ef0776a90f2cfdff16f3590c16558106094c8e1739a4b5a691c5c6003716c42" exitCode=0 Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.036907 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.036959 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9","Type":"ContainerDied","Data":"7ef0776a90f2cfdff16f3590c16558106094c8e1739a4b5a691c5c6003716c42"} Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.037024 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a39dcb96-48e9-44da-a1c1-9918ff4ff2a9","Type":"ContainerDied","Data":"d90e439e923bc6c1035d22d60f250d96f20c63e96c3c1f7d11f264964e8ff48e"} Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.037057 4871 scope.go:117] "RemoveContainer" containerID="9cf6b667b683ed75dba761fa389389c33850b2e77f9edcd1d009e4b3687ce626" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.037068 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.038261 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.038306 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.038327 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.038349 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.081948 4871 scope.go:117] "RemoveContainer" containerID="0b4404927d7d506e0c1aed4e761e167dbc336a95a2948177cb46ba4fd6144721" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.090820 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.092298 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.112922 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.126708 4871 scope.go:117] "RemoveContainer" containerID="4c10a47e5b6cc0a7e87487ef4862487f9d0490fccfaadf005ce3405f868854e9" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.130256 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:52 crc kubenswrapper[4871]: E1126 05:45:52.130773 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerName="ceilometer-central-agent" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.130789 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerName="ceilometer-central-agent" Nov 26 05:45:52 crc kubenswrapper[4871]: E1126 05:45:52.130809 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerName="sg-core" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.130815 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerName="sg-core" Nov 26 05:45:52 crc kubenswrapper[4871]: E1126 05:45:52.130827 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerName="ceilometer-notification-agent" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.130833 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerName="ceilometer-notification-agent" Nov 26 05:45:52 crc kubenswrapper[4871]: E1126 05:45:52.130851 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerName="proxy-httpd" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.130857 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerName="proxy-httpd" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.131048 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerName="sg-core" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.131058 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerName="proxy-httpd" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.131074 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerName="ceilometer-notification-agent" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.131086 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" containerName="ceilometer-central-agent" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.132871 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.137051 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.137672 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.147374 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.194654 4871 scope.go:117] "RemoveContainer" containerID="7ef0776a90f2cfdff16f3590c16558106094c8e1739a4b5a691c5c6003716c42" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.231956 4871 scope.go:117] "RemoveContainer" containerID="9cf6b667b683ed75dba761fa389389c33850b2e77f9edcd1d009e4b3687ce626" Nov 26 05:45:52 crc kubenswrapper[4871]: E1126 05:45:52.232782 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9cf6b667b683ed75dba761fa389389c33850b2e77f9edcd1d009e4b3687ce626\": container with ID starting with 9cf6b667b683ed75dba761fa389389c33850b2e77f9edcd1d009e4b3687ce626 not found: ID does not exist" containerID="9cf6b667b683ed75dba761fa389389c33850b2e77f9edcd1d009e4b3687ce626" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.232816 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9cf6b667b683ed75dba761fa389389c33850b2e77f9edcd1d009e4b3687ce626"} err="failed to get container status \"9cf6b667b683ed75dba761fa389389c33850b2e77f9edcd1d009e4b3687ce626\": rpc error: code = NotFound desc = could not find container \"9cf6b667b683ed75dba761fa389389c33850b2e77f9edcd1d009e4b3687ce626\": container with ID starting with 9cf6b667b683ed75dba761fa389389c33850b2e77f9edcd1d009e4b3687ce626 not found: ID does not exist" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.232837 4871 scope.go:117] "RemoveContainer" containerID="0b4404927d7d506e0c1aed4e761e167dbc336a95a2948177cb46ba4fd6144721" Nov 26 05:45:52 crc kubenswrapper[4871]: E1126 05:45:52.233294 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b4404927d7d506e0c1aed4e761e167dbc336a95a2948177cb46ba4fd6144721\": container with ID starting with 0b4404927d7d506e0c1aed4e761e167dbc336a95a2948177cb46ba4fd6144721 not found: ID does not exist" containerID="0b4404927d7d506e0c1aed4e761e167dbc336a95a2948177cb46ba4fd6144721" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.233319 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b4404927d7d506e0c1aed4e761e167dbc336a95a2948177cb46ba4fd6144721"} err="failed to get container status \"0b4404927d7d506e0c1aed4e761e167dbc336a95a2948177cb46ba4fd6144721\": rpc error: code = NotFound desc = could not find container \"0b4404927d7d506e0c1aed4e761e167dbc336a95a2948177cb46ba4fd6144721\": container with ID starting with 0b4404927d7d506e0c1aed4e761e167dbc336a95a2948177cb46ba4fd6144721 not found: ID does not exist" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.233333 4871 scope.go:117] "RemoveContainer" containerID="4c10a47e5b6cc0a7e87487ef4862487f9d0490fccfaadf005ce3405f868854e9" Nov 26 05:45:52 crc kubenswrapper[4871]: E1126 05:45:52.236596 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c10a47e5b6cc0a7e87487ef4862487f9d0490fccfaadf005ce3405f868854e9\": container with ID starting with 4c10a47e5b6cc0a7e87487ef4862487f9d0490fccfaadf005ce3405f868854e9 not found: ID does not exist" containerID="4c10a47e5b6cc0a7e87487ef4862487f9d0490fccfaadf005ce3405f868854e9" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.236621 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c10a47e5b6cc0a7e87487ef4862487f9d0490fccfaadf005ce3405f868854e9"} err="failed to get container status \"4c10a47e5b6cc0a7e87487ef4862487f9d0490fccfaadf005ce3405f868854e9\": rpc error: code = NotFound desc = could not find container \"4c10a47e5b6cc0a7e87487ef4862487f9d0490fccfaadf005ce3405f868854e9\": container with ID starting with 4c10a47e5b6cc0a7e87487ef4862487f9d0490fccfaadf005ce3405f868854e9 not found: ID does not exist" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.236637 4871 scope.go:117] "RemoveContainer" containerID="7ef0776a90f2cfdff16f3590c16558106094c8e1739a4b5a691c5c6003716c42" Nov 26 05:45:52 crc kubenswrapper[4871]: E1126 05:45:52.240933 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ef0776a90f2cfdff16f3590c16558106094c8e1739a4b5a691c5c6003716c42\": container with ID starting with 7ef0776a90f2cfdff16f3590c16558106094c8e1739a4b5a691c5c6003716c42 not found: ID does not exist" containerID="7ef0776a90f2cfdff16f3590c16558106094c8e1739a4b5a691c5c6003716c42" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.241026 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ef0776a90f2cfdff16f3590c16558106094c8e1739a4b5a691c5c6003716c42"} err="failed to get container status \"7ef0776a90f2cfdff16f3590c16558106094c8e1739a4b5a691c5c6003716c42\": rpc error: code = NotFound desc = could not find container \"7ef0776a90f2cfdff16f3590c16558106094c8e1739a4b5a691c5c6003716c42\": container with ID starting with 7ef0776a90f2cfdff16f3590c16558106094c8e1739a4b5a691c5c6003716c42 not found: ID does not exist" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.253984 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-run-httpd\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.254060 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-log-httpd\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.254083 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.254144 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-config-data\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.254181 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-scripts\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.254229 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vsgd\" (UniqueName: \"kubernetes.io/projected/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-kube-api-access-2vsgd\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.254263 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.355742 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-run-httpd\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.355810 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-log-httpd\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.355835 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.355875 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-config-data\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.355915 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-scripts\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.355970 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vsgd\" (UniqueName: \"kubernetes.io/projected/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-kube-api-access-2vsgd\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.356005 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.356432 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-run-httpd\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.358266 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-log-httpd\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.363088 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-config-data\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.363307 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-scripts\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.363965 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.374393 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.377420 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vsgd\" (UniqueName: \"kubernetes.io/projected/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-kube-api-access-2vsgd\") pod \"ceilometer-0\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.487238 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.521430 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a39dcb96-48e9-44da-a1c1-9918ff4ff2a9" path="/var/lib/kubelet/pods/a39dcb96-48e9-44da-a1c1-9918ff4ff2a9/volumes" Nov 26 05:45:52 crc kubenswrapper[4871]: I1126 05:45:52.953384 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:45:52 crc kubenswrapper[4871]: W1126 05:45:52.960067 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9e02bb4c_0d06_4203_bbc9_35f4f3b3c1ef.slice/crio-c415895006d9a413f422b48c3c4804a0c1544204db47ccf43d7e686387a78096 WatchSource:0}: Error finding container c415895006d9a413f422b48c3c4804a0c1544204db47ccf43d7e686387a78096: Status 404 returned error can't find the container with id c415895006d9a413f422b48c3c4804a0c1544204db47ccf43d7e686387a78096 Nov 26 05:45:53 crc kubenswrapper[4871]: I1126 05:45:53.046822 4871 generic.go:334] "Generic (PLEG): container finished" podID="6393db54-1c1f-47bc-8669-e56ed280db54" containerID="0b3f41dc451b2d5092df4530c819384ade2cd6e8bcc1cc5cc6ed1ee49c17ce9e" exitCode=0 Nov 26 05:45:53 crc kubenswrapper[4871]: I1126 05:45:53.046905 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-tfgn7" event={"ID":"6393db54-1c1f-47bc-8669-e56ed280db54","Type":"ContainerDied","Data":"0b3f41dc451b2d5092df4530c819384ade2cd6e8bcc1cc5cc6ed1ee49c17ce9e"} Nov 26 05:45:53 crc kubenswrapper[4871]: I1126 05:45:53.064860 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef","Type":"ContainerStarted","Data":"c415895006d9a413f422b48c3c4804a0c1544204db47ccf43d7e686387a78096"} Nov 26 05:45:53 crc kubenswrapper[4871]: I1126 05:45:53.984950 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:54 crc kubenswrapper[4871]: I1126 05:45:54.059819 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 05:45:54 crc kubenswrapper[4871]: I1126 05:45:54.084649 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef","Type":"ContainerStarted","Data":"ba9e63f547cce51df9656859f4618a04e37c4b58b195771d86440a91ef178a55"} Nov 26 05:45:54 crc kubenswrapper[4871]: I1126 05:45:54.084713 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef","Type":"ContainerStarted","Data":"fa64d744984459215467fc80c6671d31fb1156ea2044451962d87355d48658d6"} Nov 26 05:45:54 crc kubenswrapper[4871]: I1126 05:45:54.122988 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 05:45:54 crc kubenswrapper[4871]: I1126 05:45:54.123077 4871 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 05:45:54 crc kubenswrapper[4871]: I1126 05:45:54.125445 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 05:45:54 crc kubenswrapper[4871]: I1126 05:45:54.667377 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-tfgn7" Nov 26 05:45:54 crc kubenswrapper[4871]: I1126 05:45:54.731924 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-combined-ca-bundle\") pod \"6393db54-1c1f-47bc-8669-e56ed280db54\" (UID: \"6393db54-1c1f-47bc-8669-e56ed280db54\") " Nov 26 05:45:54 crc kubenswrapper[4871]: I1126 05:45:54.732059 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-scripts\") pod \"6393db54-1c1f-47bc-8669-e56ed280db54\" (UID: \"6393db54-1c1f-47bc-8669-e56ed280db54\") " Nov 26 05:45:54 crc kubenswrapper[4871]: I1126 05:45:54.732089 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-config-data\") pod \"6393db54-1c1f-47bc-8669-e56ed280db54\" (UID: \"6393db54-1c1f-47bc-8669-e56ed280db54\") " Nov 26 05:45:54 crc kubenswrapper[4871]: I1126 05:45:54.732116 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-df6l6\" (UniqueName: \"kubernetes.io/projected/6393db54-1c1f-47bc-8669-e56ed280db54-kube-api-access-df6l6\") pod \"6393db54-1c1f-47bc-8669-e56ed280db54\" (UID: \"6393db54-1c1f-47bc-8669-e56ed280db54\") " Nov 26 05:45:54 crc kubenswrapper[4871]: I1126 05:45:54.741734 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6393db54-1c1f-47bc-8669-e56ed280db54-kube-api-access-df6l6" (OuterVolumeSpecName: "kube-api-access-df6l6") pod "6393db54-1c1f-47bc-8669-e56ed280db54" (UID: "6393db54-1c1f-47bc-8669-e56ed280db54"). InnerVolumeSpecName "kube-api-access-df6l6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:45:54 crc kubenswrapper[4871]: I1126 05:45:54.742660 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-scripts" (OuterVolumeSpecName: "scripts") pod "6393db54-1c1f-47bc-8669-e56ed280db54" (UID: "6393db54-1c1f-47bc-8669-e56ed280db54"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:54 crc kubenswrapper[4871]: E1126 05:45:54.775782 4871 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-config-data podName:6393db54-1c1f-47bc-8669-e56ed280db54 nodeName:}" failed. No retries permitted until 2025-11-26 05:45:55.27575662 +0000 UTC m=+1213.458808206 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "config-data" (UniqueName: "kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-config-data") pod "6393db54-1c1f-47bc-8669-e56ed280db54" (UID: "6393db54-1c1f-47bc-8669-e56ed280db54") : error deleting /var/lib/kubelet/pods/6393db54-1c1f-47bc-8669-e56ed280db54/volume-subpaths: remove /var/lib/kubelet/pods/6393db54-1c1f-47bc-8669-e56ed280db54/volume-subpaths: no such file or directory Nov 26 05:45:54 crc kubenswrapper[4871]: I1126 05:45:54.779316 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6393db54-1c1f-47bc-8669-e56ed280db54" (UID: "6393db54-1c1f-47bc-8669-e56ed280db54"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:54 crc kubenswrapper[4871]: I1126 05:45:54.837096 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:54 crc kubenswrapper[4871]: I1126 05:45:54.837138 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:54 crc kubenswrapper[4871]: I1126 05:45:54.837149 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-df6l6\" (UniqueName: \"kubernetes.io/projected/6393db54-1c1f-47bc-8669-e56ed280db54-kube-api-access-df6l6\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.096696 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-tfgn7" Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.096892 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-tfgn7" event={"ID":"6393db54-1c1f-47bc-8669-e56ed280db54","Type":"ContainerDied","Data":"ed297b4a4fc1daac5da5464ce57b3bf0a0c1203f0e53df727ca377e627e39bef"} Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.096916 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ed297b4a4fc1daac5da5464ce57b3bf0a0c1203f0e53df727ca377e627e39bef" Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.184757 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 05:45:55 crc kubenswrapper[4871]: E1126 05:45:55.185693 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6393db54-1c1f-47bc-8669-e56ed280db54" containerName="nova-cell0-conductor-db-sync" Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.185720 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6393db54-1c1f-47bc-8669-e56ed280db54" containerName="nova-cell0-conductor-db-sync" Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.186099 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="6393db54-1c1f-47bc-8669-e56ed280db54" containerName="nova-cell0-conductor-db-sync" Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.187004 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.210563 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.245166 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20a39a9e-9f10-45c6-be1c-9834e366658f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"20a39a9e-9f10-45c6-be1c-9834e366658f\") " pod="openstack/nova-cell0-conductor-0" Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.245430 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20a39a9e-9f10-45c6-be1c-9834e366658f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"20a39a9e-9f10-45c6-be1c-9834e366658f\") " pod="openstack/nova-cell0-conductor-0" Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.245661 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gktjs\" (UniqueName: \"kubernetes.io/projected/20a39a9e-9f10-45c6-be1c-9834e366658f-kube-api-access-gktjs\") pod \"nova-cell0-conductor-0\" (UID: \"20a39a9e-9f10-45c6-be1c-9834e366658f\") " pod="openstack/nova-cell0-conductor-0" Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.346804 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-config-data\") pod \"6393db54-1c1f-47bc-8669-e56ed280db54\" (UID: \"6393db54-1c1f-47bc-8669-e56ed280db54\") " Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.347474 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20a39a9e-9f10-45c6-be1c-9834e366658f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"20a39a9e-9f10-45c6-be1c-9834e366658f\") " pod="openstack/nova-cell0-conductor-0" Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.347520 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20a39a9e-9f10-45c6-be1c-9834e366658f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"20a39a9e-9f10-45c6-be1c-9834e366658f\") " pod="openstack/nova-cell0-conductor-0" Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.347584 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gktjs\" (UniqueName: \"kubernetes.io/projected/20a39a9e-9f10-45c6-be1c-9834e366658f-kube-api-access-gktjs\") pod \"nova-cell0-conductor-0\" (UID: \"20a39a9e-9f10-45c6-be1c-9834e366658f\") " pod="openstack/nova-cell0-conductor-0" Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.351786 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-config-data" (OuterVolumeSpecName: "config-data") pod "6393db54-1c1f-47bc-8669-e56ed280db54" (UID: "6393db54-1c1f-47bc-8669-e56ed280db54"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.362469 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20a39a9e-9f10-45c6-be1c-9834e366658f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"20a39a9e-9f10-45c6-be1c-9834e366658f\") " pod="openstack/nova-cell0-conductor-0" Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.365376 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20a39a9e-9f10-45c6-be1c-9834e366658f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"20a39a9e-9f10-45c6-be1c-9834e366658f\") " pod="openstack/nova-cell0-conductor-0" Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.365827 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gktjs\" (UniqueName: \"kubernetes.io/projected/20a39a9e-9f10-45c6-be1c-9834e366658f-kube-api-access-gktjs\") pod \"nova-cell0-conductor-0\" (UID: \"20a39a9e-9f10-45c6-be1c-9834e366658f\") " pod="openstack/nova-cell0-conductor-0" Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.449822 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6393db54-1c1f-47bc-8669-e56ed280db54-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:45:55 crc kubenswrapper[4871]: I1126 05:45:55.505170 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 05:45:56 crc kubenswrapper[4871]: I1126 05:45:56.473514 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 05:45:57 crc kubenswrapper[4871]: I1126 05:45:57.124510 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef","Type":"ContainerStarted","Data":"a9e03b8f86c54ba4d2ffba9948c42978d3511e0fdb43426835465b40e3e5d25f"} Nov 26 05:45:57 crc kubenswrapper[4871]: I1126 05:45:57.126558 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"20a39a9e-9f10-45c6-be1c-9834e366658f","Type":"ContainerStarted","Data":"3b50b41fd671d82539240eded4d50512d80a55f96c3d64333289d8d2e71aeae3"} Nov 26 05:45:57 crc kubenswrapper[4871]: I1126 05:45:57.126611 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"20a39a9e-9f10-45c6-be1c-9834e366658f","Type":"ContainerStarted","Data":"e3c6a5a21f8a2a12b19c4bad816fee959da8fe81f83daa596105d1219bbcbf6a"} Nov 26 05:45:57 crc kubenswrapper[4871]: I1126 05:45:57.126695 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 26 05:45:57 crc kubenswrapper[4871]: I1126 05:45:57.143183 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.14316633 podStartE2EDuration="2.14316633s" podCreationTimestamp="2025-11-26 05:45:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:45:57.140688679 +0000 UTC m=+1215.323740275" watchObservedRunningTime="2025-11-26 05:45:57.14316633 +0000 UTC m=+1215.326217916" Nov 26 05:45:58 crc kubenswrapper[4871]: I1126 05:45:58.138281 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef","Type":"ContainerStarted","Data":"bf3b773a94780de7572d95f628daf1dd756367f97c96bab21b016729aff72b1a"} Nov 26 05:45:58 crc kubenswrapper[4871]: I1126 05:45:58.170615 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.838440087 podStartE2EDuration="6.170593751s" podCreationTimestamp="2025-11-26 05:45:52 +0000 UTC" firstStartedPulling="2025-11-26 05:45:52.962332426 +0000 UTC m=+1211.145384012" lastFinishedPulling="2025-11-26 05:45:57.29448609 +0000 UTC m=+1215.477537676" observedRunningTime="2025-11-26 05:45:58.167209188 +0000 UTC m=+1216.350260774" watchObservedRunningTime="2025-11-26 05:45:58.170593751 +0000 UTC m=+1216.353645337" Nov 26 05:45:59 crc kubenswrapper[4871]: I1126 05:45:59.156499 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 05:46:05 crc kubenswrapper[4871]: I1126 05:46:05.556692 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.189404 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-l92kl"] Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.191605 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-l92kl" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.195390 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.196210 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.202500 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-l92kl"] Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.290648 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-l92kl\" (UID: \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\") " pod="openstack/nova-cell0-cell-mapping-l92kl" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.290844 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-scripts\") pod \"nova-cell0-cell-mapping-l92kl\" (UID: \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\") " pod="openstack/nova-cell0-cell-mapping-l92kl" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.291001 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-config-data\") pod \"nova-cell0-cell-mapping-l92kl\" (UID: \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\") " pod="openstack/nova-cell0-cell-mapping-l92kl" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.291277 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6h4h\" (UniqueName: \"kubernetes.io/projected/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-kube-api-access-c6h4h\") pod \"nova-cell0-cell-mapping-l92kl\" (UID: \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\") " pod="openstack/nova-cell0-cell-mapping-l92kl" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.362717 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.370339 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.373448 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.386627 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.399848 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/686e3965-014e-4639-be92-909cf5e0d6b0-config-data\") pod \"nova-scheduler-0\" (UID: \"686e3965-014e-4639-be92-909cf5e0d6b0\") " pod="openstack/nova-scheduler-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.399932 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nm6zj\" (UniqueName: \"kubernetes.io/projected/686e3965-014e-4639-be92-909cf5e0d6b0-kube-api-access-nm6zj\") pod \"nova-scheduler-0\" (UID: \"686e3965-014e-4639-be92-909cf5e0d6b0\") " pod="openstack/nova-scheduler-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.399975 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-l92kl\" (UID: \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\") " pod="openstack/nova-cell0-cell-mapping-l92kl" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.400030 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-scripts\") pod \"nova-cell0-cell-mapping-l92kl\" (UID: \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\") " pod="openstack/nova-cell0-cell-mapping-l92kl" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.400065 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/686e3965-014e-4639-be92-909cf5e0d6b0-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"686e3965-014e-4639-be92-909cf5e0d6b0\") " pod="openstack/nova-scheduler-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.400102 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-config-data\") pod \"nova-cell0-cell-mapping-l92kl\" (UID: \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\") " pod="openstack/nova-cell0-cell-mapping-l92kl" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.400201 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6h4h\" (UniqueName: \"kubernetes.io/projected/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-kube-api-access-c6h4h\") pod \"nova-cell0-cell-mapping-l92kl\" (UID: \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\") " pod="openstack/nova-cell0-cell-mapping-l92kl" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.412385 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-scripts\") pod \"nova-cell0-cell-mapping-l92kl\" (UID: \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\") " pod="openstack/nova-cell0-cell-mapping-l92kl" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.413111 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-config-data\") pod \"nova-cell0-cell-mapping-l92kl\" (UID: \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\") " pod="openstack/nova-cell0-cell-mapping-l92kl" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.436891 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-l92kl\" (UID: \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\") " pod="openstack/nova-cell0-cell-mapping-l92kl" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.470940 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6h4h\" (UniqueName: \"kubernetes.io/projected/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-kube-api-access-c6h4h\") pod \"nova-cell0-cell-mapping-l92kl\" (UID: \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\") " pod="openstack/nova-cell0-cell-mapping-l92kl" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.481622 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.512057 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.533533 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.533871 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/686e3965-014e-4639-be92-909cf5e0d6b0-config-data\") pod \"nova-scheduler-0\" (UID: \"686e3965-014e-4639-be92-909cf5e0d6b0\") " pod="openstack/nova-scheduler-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.533979 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab149d36-1511-4420-8d1e-c33cca902bf2-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab149d36-1511-4420-8d1e-c33cca902bf2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.534044 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nm6zj\" (UniqueName: \"kubernetes.io/projected/686e3965-014e-4639-be92-909cf5e0d6b0-kube-api-access-nm6zj\") pod \"nova-scheduler-0\" (UID: \"686e3965-014e-4639-be92-909cf5e0d6b0\") " pod="openstack/nova-scheduler-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.534078 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab149d36-1511-4420-8d1e-c33cca902bf2-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab149d36-1511-4420-8d1e-c33cca902bf2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.534210 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/686e3965-014e-4639-be92-909cf5e0d6b0-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"686e3965-014e-4639-be92-909cf5e0d6b0\") " pod="openstack/nova-scheduler-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.534395 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2n59t\" (UniqueName: \"kubernetes.io/projected/ab149d36-1511-4420-8d1e-c33cca902bf2-kube-api-access-2n59t\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab149d36-1511-4420-8d1e-c33cca902bf2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.562994 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-l92kl" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.608670 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nm6zj\" (UniqueName: \"kubernetes.io/projected/686e3965-014e-4639-be92-909cf5e0d6b0-kube-api-access-nm6zj\") pod \"nova-scheduler-0\" (UID: \"686e3965-014e-4639-be92-909cf5e0d6b0\") " pod="openstack/nova-scheduler-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.611674 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/686e3965-014e-4639-be92-909cf5e0d6b0-config-data\") pod \"nova-scheduler-0\" (UID: \"686e3965-014e-4639-be92-909cf5e0d6b0\") " pod="openstack/nova-scheduler-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.644554 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2n59t\" (UniqueName: \"kubernetes.io/projected/ab149d36-1511-4420-8d1e-c33cca902bf2-kube-api-access-2n59t\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab149d36-1511-4420-8d1e-c33cca902bf2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.645818 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab149d36-1511-4420-8d1e-c33cca902bf2-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab149d36-1511-4420-8d1e-c33cca902bf2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.645905 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab149d36-1511-4420-8d1e-c33cca902bf2-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab149d36-1511-4420-8d1e-c33cca902bf2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.653308 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab149d36-1511-4420-8d1e-c33cca902bf2-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab149d36-1511-4420-8d1e-c33cca902bf2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.671377 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/686e3965-014e-4639-be92-909cf5e0d6b0-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"686e3965-014e-4639-be92-909cf5e0d6b0\") " pod="openstack/nova-scheduler-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.672604 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab149d36-1511-4420-8d1e-c33cca902bf2-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab149d36-1511-4420-8d1e-c33cca902bf2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.691978 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2n59t\" (UniqueName: \"kubernetes.io/projected/ab149d36-1511-4420-8d1e-c33cca902bf2-kube-api-access-2n59t\") pod \"nova-cell1-novncproxy-0\" (UID: \"ab149d36-1511-4420-8d1e-c33cca902bf2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.702293 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.714636 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.716575 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.717713 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.723887 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.733241 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.756581 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.758135 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.758548 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\") " pod="openstack/nova-api-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.760197 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrflt\" (UniqueName: \"kubernetes.io/projected/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-kube-api-access-nrflt\") pod \"nova-api-0\" (UID: \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\") " pod="openstack/nova-api-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.760234 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-config-data\") pod \"nova-api-0\" (UID: \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\") " pod="openstack/nova-api-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.760372 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-logs\") pod \"nova-api-0\" (UID: \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\") " pod="openstack/nova-api-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.760746 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.776223 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.807562 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-844fc57f6f-g4x5h"] Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.809871 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.815201 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-844fc57f6f-g4x5h"] Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.856742 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.862701 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-ovsdbserver-sb\") pod \"dnsmasq-dns-844fc57f6f-g4x5h\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.862748 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-dns-svc\") pod \"dnsmasq-dns-844fc57f6f-g4x5h\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.862779 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nz2v\" (UniqueName: \"kubernetes.io/projected/aac7a468-053c-4c4e-a3fb-99fc89d3e939-kube-api-access-9nz2v\") pod \"dnsmasq-dns-844fc57f6f-g4x5h\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.862803 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-logs\") pod \"nova-api-0\" (UID: \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\") " pod="openstack/nova-api-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.862833 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\") " pod="openstack/nova-api-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.862879 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-config\") pod \"dnsmasq-dns-844fc57f6f-g4x5h\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.862901 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-ovsdbserver-nb\") pod \"dnsmasq-dns-844fc57f6f-g4x5h\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.862916 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a85c4d89-38e1-4218-aed5-28ba79374b78-config-data\") pod \"nova-metadata-0\" (UID: \"a85c4d89-38e1-4218-aed5-28ba79374b78\") " pod="openstack/nova-metadata-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.862932 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-dns-swift-storage-0\") pod \"dnsmasq-dns-844fc57f6f-g4x5h\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.862955 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a85c4d89-38e1-4218-aed5-28ba79374b78-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a85c4d89-38e1-4218-aed5-28ba79374b78\") " pod="openstack/nova-metadata-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.862978 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrflt\" (UniqueName: \"kubernetes.io/projected/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-kube-api-access-nrflt\") pod \"nova-api-0\" (UID: \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\") " pod="openstack/nova-api-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.862995 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-config-data\") pod \"nova-api-0\" (UID: \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\") " pod="openstack/nova-api-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.863009 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4r65b\" (UniqueName: \"kubernetes.io/projected/a85c4d89-38e1-4218-aed5-28ba79374b78-kube-api-access-4r65b\") pod \"nova-metadata-0\" (UID: \"a85c4d89-38e1-4218-aed5-28ba79374b78\") " pod="openstack/nova-metadata-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.863034 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a85c4d89-38e1-4218-aed5-28ba79374b78-logs\") pod \"nova-metadata-0\" (UID: \"a85c4d89-38e1-4218-aed5-28ba79374b78\") " pod="openstack/nova-metadata-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.863406 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-logs\") pod \"nova-api-0\" (UID: \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\") " pod="openstack/nova-api-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.874391 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-config-data\") pod \"nova-api-0\" (UID: \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\") " pod="openstack/nova-api-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.877995 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\") " pod="openstack/nova-api-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.905123 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrflt\" (UniqueName: \"kubernetes.io/projected/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-kube-api-access-nrflt\") pod \"nova-api-0\" (UID: \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\") " pod="openstack/nova-api-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.967770 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-dns-svc\") pod \"dnsmasq-dns-844fc57f6f-g4x5h\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.968144 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nz2v\" (UniqueName: \"kubernetes.io/projected/aac7a468-053c-4c4e-a3fb-99fc89d3e939-kube-api-access-9nz2v\") pod \"dnsmasq-dns-844fc57f6f-g4x5h\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.968231 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-config\") pod \"dnsmasq-dns-844fc57f6f-g4x5h\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.968256 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a85c4d89-38e1-4218-aed5-28ba79374b78-config-data\") pod \"nova-metadata-0\" (UID: \"a85c4d89-38e1-4218-aed5-28ba79374b78\") " pod="openstack/nova-metadata-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.968273 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-ovsdbserver-nb\") pod \"dnsmasq-dns-844fc57f6f-g4x5h\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.968293 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-dns-swift-storage-0\") pod \"dnsmasq-dns-844fc57f6f-g4x5h\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.968320 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a85c4d89-38e1-4218-aed5-28ba79374b78-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a85c4d89-38e1-4218-aed5-28ba79374b78\") " pod="openstack/nova-metadata-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.968349 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4r65b\" (UniqueName: \"kubernetes.io/projected/a85c4d89-38e1-4218-aed5-28ba79374b78-kube-api-access-4r65b\") pod \"nova-metadata-0\" (UID: \"a85c4d89-38e1-4218-aed5-28ba79374b78\") " pod="openstack/nova-metadata-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.968378 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a85c4d89-38e1-4218-aed5-28ba79374b78-logs\") pod \"nova-metadata-0\" (UID: \"a85c4d89-38e1-4218-aed5-28ba79374b78\") " pod="openstack/nova-metadata-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.968420 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-ovsdbserver-sb\") pod \"dnsmasq-dns-844fc57f6f-g4x5h\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.969013 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-dns-svc\") pod \"dnsmasq-dns-844fc57f6f-g4x5h\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.969207 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-ovsdbserver-sb\") pod \"dnsmasq-dns-844fc57f6f-g4x5h\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.973061 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-config\") pod \"dnsmasq-dns-844fc57f6f-g4x5h\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.973739 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a85c4d89-38e1-4218-aed5-28ba79374b78-logs\") pod \"nova-metadata-0\" (UID: \"a85c4d89-38e1-4218-aed5-28ba79374b78\") " pod="openstack/nova-metadata-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.976221 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-dns-swift-storage-0\") pod \"dnsmasq-dns-844fc57f6f-g4x5h\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.978027 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a85c4d89-38e1-4218-aed5-28ba79374b78-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a85c4d89-38e1-4218-aed5-28ba79374b78\") " pod="openstack/nova-metadata-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.978550 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a85c4d89-38e1-4218-aed5-28ba79374b78-config-data\") pod \"nova-metadata-0\" (UID: \"a85c4d89-38e1-4218-aed5-28ba79374b78\") " pod="openstack/nova-metadata-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.980307 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-ovsdbserver-nb\") pod \"dnsmasq-dns-844fc57f6f-g4x5h\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.989249 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4r65b\" (UniqueName: \"kubernetes.io/projected/a85c4d89-38e1-4218-aed5-28ba79374b78-kube-api-access-4r65b\") pod \"nova-metadata-0\" (UID: \"a85c4d89-38e1-4218-aed5-28ba79374b78\") " pod="openstack/nova-metadata-0" Nov 26 05:46:06 crc kubenswrapper[4871]: I1126 05:46:06.989418 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nz2v\" (UniqueName: \"kubernetes.io/projected/aac7a468-053c-4c4e-a3fb-99fc89d3e939-kube-api-access-9nz2v\") pod \"dnsmasq-dns-844fc57f6f-g4x5h\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.063921 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.125328 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.162489 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.233649 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-l92kl"] Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.347984 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-22j7b"] Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.349610 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-22j7b" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.351672 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.352515 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.381402 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-22j7b"] Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.412170 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.474722 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8bc8\" (UniqueName: \"kubernetes.io/projected/8f388a8a-48e4-4503-a842-23c380a1c649-kube-api-access-s8bc8\") pod \"nova-cell1-conductor-db-sync-22j7b\" (UID: \"8f388a8a-48e4-4503-a842-23c380a1c649\") " pod="openstack/nova-cell1-conductor-db-sync-22j7b" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.474763 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f388a8a-48e4-4503-a842-23c380a1c649-scripts\") pod \"nova-cell1-conductor-db-sync-22j7b\" (UID: \"8f388a8a-48e4-4503-a842-23c380a1c649\") " pod="openstack/nova-cell1-conductor-db-sync-22j7b" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.475475 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f388a8a-48e4-4503-a842-23c380a1c649-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-22j7b\" (UID: \"8f388a8a-48e4-4503-a842-23c380a1c649\") " pod="openstack/nova-cell1-conductor-db-sync-22j7b" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.475560 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f388a8a-48e4-4503-a842-23c380a1c649-config-data\") pod \"nova-cell1-conductor-db-sync-22j7b\" (UID: \"8f388a8a-48e4-4503-a842-23c380a1c649\") " pod="openstack/nova-cell1-conductor-db-sync-22j7b" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.511630 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 05:46:07 crc kubenswrapper[4871]: W1126 05:46:07.517884 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podab149d36_1511_4420_8d1e_c33cca902bf2.slice/crio-55fba4985e1db3398a9550a0dd99379b3c70802b7e9d08eaafcc7d006d84e487 WatchSource:0}: Error finding container 55fba4985e1db3398a9550a0dd99379b3c70802b7e9d08eaafcc7d006d84e487: Status 404 returned error can't find the container with id 55fba4985e1db3398a9550a0dd99379b3c70802b7e9d08eaafcc7d006d84e487 Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.578532 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f388a8a-48e4-4503-a842-23c380a1c649-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-22j7b\" (UID: \"8f388a8a-48e4-4503-a842-23c380a1c649\") " pod="openstack/nova-cell1-conductor-db-sync-22j7b" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.579275 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f388a8a-48e4-4503-a842-23c380a1c649-config-data\") pod \"nova-cell1-conductor-db-sync-22j7b\" (UID: \"8f388a8a-48e4-4503-a842-23c380a1c649\") " pod="openstack/nova-cell1-conductor-db-sync-22j7b" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.579409 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8bc8\" (UniqueName: \"kubernetes.io/projected/8f388a8a-48e4-4503-a842-23c380a1c649-kube-api-access-s8bc8\") pod \"nova-cell1-conductor-db-sync-22j7b\" (UID: \"8f388a8a-48e4-4503-a842-23c380a1c649\") " pod="openstack/nova-cell1-conductor-db-sync-22j7b" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.579449 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f388a8a-48e4-4503-a842-23c380a1c649-scripts\") pod \"nova-cell1-conductor-db-sync-22j7b\" (UID: \"8f388a8a-48e4-4503-a842-23c380a1c649\") " pod="openstack/nova-cell1-conductor-db-sync-22j7b" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.584477 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f388a8a-48e4-4503-a842-23c380a1c649-scripts\") pod \"nova-cell1-conductor-db-sync-22j7b\" (UID: \"8f388a8a-48e4-4503-a842-23c380a1c649\") " pod="openstack/nova-cell1-conductor-db-sync-22j7b" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.587374 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f388a8a-48e4-4503-a842-23c380a1c649-config-data\") pod \"nova-cell1-conductor-db-sync-22j7b\" (UID: \"8f388a8a-48e4-4503-a842-23c380a1c649\") " pod="openstack/nova-cell1-conductor-db-sync-22j7b" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.587558 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f388a8a-48e4-4503-a842-23c380a1c649-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-22j7b\" (UID: \"8f388a8a-48e4-4503-a842-23c380a1c649\") " pod="openstack/nova-cell1-conductor-db-sync-22j7b" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.595996 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8bc8\" (UniqueName: \"kubernetes.io/projected/8f388a8a-48e4-4503-a842-23c380a1c649-kube-api-access-s8bc8\") pod \"nova-cell1-conductor-db-sync-22j7b\" (UID: \"8f388a8a-48e4-4503-a842-23c380a1c649\") " pod="openstack/nova-cell1-conductor-db-sync-22j7b" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.632345 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.685117 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-22j7b" Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.763136 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:46:07 crc kubenswrapper[4871]: W1126 05:46:07.774693 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda85c4d89_38e1_4218_aed5_28ba79374b78.slice/crio-97cdce1ae84ca92afc2781d0c466431c57a092828508c680b9805a87459b8187 WatchSource:0}: Error finding container 97cdce1ae84ca92afc2781d0c466431c57a092828508c680b9805a87459b8187: Status 404 returned error can't find the container with id 97cdce1ae84ca92afc2781d0c466431c57a092828508c680b9805a87459b8187 Nov 26 05:46:07 crc kubenswrapper[4871]: I1126 05:46:07.778583 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-844fc57f6f-g4x5h"] Nov 26 05:46:07 crc kubenswrapper[4871]: W1126 05:46:07.785864 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaac7a468_053c_4c4e_a3fb_99fc89d3e939.slice/crio-df56ef4de10d376214174df5322c184990c85fa187bf1355daed7fd0e910afcb WatchSource:0}: Error finding container df56ef4de10d376214174df5322c184990c85fa187bf1355daed7fd0e910afcb: Status 404 returned error can't find the container with id df56ef4de10d376214174df5322c184990c85fa187bf1355daed7fd0e910afcb Nov 26 05:46:08 crc kubenswrapper[4871]: I1126 05:46:08.260716 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88","Type":"ContainerStarted","Data":"3f1d1b9248b3e6ebc16effdce7727297aff37b26861ff9ddf016259e8d7312f5"} Nov 26 05:46:08 crc kubenswrapper[4871]: I1126 05:46:08.265029 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ab149d36-1511-4420-8d1e-c33cca902bf2","Type":"ContainerStarted","Data":"55fba4985e1db3398a9550a0dd99379b3c70802b7e9d08eaafcc7d006d84e487"} Nov 26 05:46:08 crc kubenswrapper[4871]: I1126 05:46:08.267630 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a85c4d89-38e1-4218-aed5-28ba79374b78","Type":"ContainerStarted","Data":"97cdce1ae84ca92afc2781d0c466431c57a092828508c680b9805a87459b8187"} Nov 26 05:46:08 crc kubenswrapper[4871]: I1126 05:46:08.269361 4871 generic.go:334] "Generic (PLEG): container finished" podID="aac7a468-053c-4c4e-a3fb-99fc89d3e939" containerID="293687b83c927fbe9c34ec5a70081a1e16fcf9eda30a0ce18d9304e2e7d98427" exitCode=0 Nov 26 05:46:08 crc kubenswrapper[4871]: I1126 05:46:08.269521 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" event={"ID":"aac7a468-053c-4c4e-a3fb-99fc89d3e939","Type":"ContainerDied","Data":"293687b83c927fbe9c34ec5a70081a1e16fcf9eda30a0ce18d9304e2e7d98427"} Nov 26 05:46:08 crc kubenswrapper[4871]: I1126 05:46:08.269577 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" event={"ID":"aac7a468-053c-4c4e-a3fb-99fc89d3e939","Type":"ContainerStarted","Data":"df56ef4de10d376214174df5322c184990c85fa187bf1355daed7fd0e910afcb"} Nov 26 05:46:08 crc kubenswrapper[4871]: I1126 05:46:08.271382 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-22j7b"] Nov 26 05:46:08 crc kubenswrapper[4871]: I1126 05:46:08.272003 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-l92kl" event={"ID":"fc3703e8-ee84-4c63-983b-a1f0ea6976f1","Type":"ContainerStarted","Data":"7449fc98f53ce5ac1e6441d6e3940f69dc80e9702a7a8c5beb1f1a588f708c39"} Nov 26 05:46:08 crc kubenswrapper[4871]: I1126 05:46:08.272027 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-l92kl" event={"ID":"fc3703e8-ee84-4c63-983b-a1f0ea6976f1","Type":"ContainerStarted","Data":"ade9232099e383a4dfd1291baf610ead0aa330e1c639fdabb8add6b453d32168"} Nov 26 05:46:08 crc kubenswrapper[4871]: I1126 05:46:08.277091 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"686e3965-014e-4639-be92-909cf5e0d6b0","Type":"ContainerStarted","Data":"24cf430787fee6e3238b6891663eecaf272fc57ed6cf8a59d69258d9ec56d4f9"} Nov 26 05:46:08 crc kubenswrapper[4871]: I1126 05:46:08.304164 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-l92kl" podStartSLOduration=2.304146775 podStartE2EDuration="2.304146775s" podCreationTimestamp="2025-11-26 05:46:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:46:08.300836684 +0000 UTC m=+1226.483888270" watchObservedRunningTime="2025-11-26 05:46:08.304146775 +0000 UTC m=+1226.487198361" Nov 26 05:46:09 crc kubenswrapper[4871]: I1126 05:46:09.293982 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-22j7b" event={"ID":"8f388a8a-48e4-4503-a842-23c380a1c649","Type":"ContainerStarted","Data":"53ab1f13208b324ec2c352f7b58f323415a3adda284c7b3e7124f03afa93fb3e"} Nov 26 05:46:09 crc kubenswrapper[4871]: I1126 05:46:09.294318 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-22j7b" event={"ID":"8f388a8a-48e4-4503-a842-23c380a1c649","Type":"ContainerStarted","Data":"a4b9b195f30f1fb27870545cc5f335218ea649339eaa161cf1986998f23ed4a0"} Nov 26 05:46:09 crc kubenswrapper[4871]: I1126 05:46:09.297396 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" event={"ID":"aac7a468-053c-4c4e-a3fb-99fc89d3e939","Type":"ContainerStarted","Data":"7b88565df9685b8bba28d6fbee6f0a441ed4cfbe71e148f3e663d00c29a3a6b6"} Nov 26 05:46:09 crc kubenswrapper[4871]: I1126 05:46:09.297452 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:09 crc kubenswrapper[4871]: I1126 05:46:09.319433 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-22j7b" podStartSLOduration=2.319417259 podStartE2EDuration="2.319417259s" podCreationTimestamp="2025-11-26 05:46:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:46:09.313822742 +0000 UTC m=+1227.496874318" watchObservedRunningTime="2025-11-26 05:46:09.319417259 +0000 UTC m=+1227.502468845" Nov 26 05:46:09 crc kubenswrapper[4871]: I1126 05:46:09.342209 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" podStartSLOduration=3.342191468 podStartE2EDuration="3.342191468s" podCreationTimestamp="2025-11-26 05:46:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:46:09.332605403 +0000 UTC m=+1227.515656979" watchObservedRunningTime="2025-11-26 05:46:09.342191468 +0000 UTC m=+1227.525243054" Nov 26 05:46:10 crc kubenswrapper[4871]: I1126 05:46:10.433726 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:46:10 crc kubenswrapper[4871]: I1126 05:46:10.444214 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.324310 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"686e3965-014e-4639-be92-909cf5e0d6b0","Type":"ContainerStarted","Data":"22e6cb128ffea469e61941b44734ce0419cd3e3dc34120398d2b050072781072"} Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.326286 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88","Type":"ContainerStarted","Data":"981dee769ebd34f3bc41a9fda57d3ec676abbcf0ba4c1b94a50c29cf3cc02562"} Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.326348 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88","Type":"ContainerStarted","Data":"6ce936f7e5bb9ed3a9b04f43b5473706a905454a61e9c2b33cb767530c3a4320"} Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.328413 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a85c4d89-38e1-4218-aed5-28ba79374b78","Type":"ContainerStarted","Data":"e0ec7d18eb6f6707d1b2950f2e8bb7ce94ef281a9ec9778c992db82c7946cab8"} Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.328465 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a85c4d89-38e1-4218-aed5-28ba79374b78","Type":"ContainerStarted","Data":"badbbf5fbb4a4514b5d89276866f8994bbc94f66290504e486cfbf614720b8dc"} Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.328491 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a85c4d89-38e1-4218-aed5-28ba79374b78" containerName="nova-metadata-log" containerID="cri-o://badbbf5fbb4a4514b5d89276866f8994bbc94f66290504e486cfbf614720b8dc" gracePeriod=30 Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.328509 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a85c4d89-38e1-4218-aed5-28ba79374b78" containerName="nova-metadata-metadata" containerID="cri-o://e0ec7d18eb6f6707d1b2950f2e8bb7ce94ef281a9ec9778c992db82c7946cab8" gracePeriod=30 Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.334223 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ab149d36-1511-4420-8d1e-c33cca902bf2","Type":"ContainerStarted","Data":"5fc76647b33fd8043e4fd68aa32974de96c16dde68c273a54006b703b8721b28"} Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.334353 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="ab149d36-1511-4420-8d1e-c33cca902bf2" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://5fc76647b33fd8043e4fd68aa32974de96c16dde68c273a54006b703b8721b28" gracePeriod=30 Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.349150 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.375465304 podStartE2EDuration="5.349129868s" podCreationTimestamp="2025-11-26 05:46:06 +0000 UTC" firstStartedPulling="2025-11-26 05:46:07.42383422 +0000 UTC m=+1225.606885806" lastFinishedPulling="2025-11-26 05:46:10.397498784 +0000 UTC m=+1228.580550370" observedRunningTime="2025-11-26 05:46:11.346372661 +0000 UTC m=+1229.529424247" watchObservedRunningTime="2025-11-26 05:46:11.349129868 +0000 UTC m=+1229.532181464" Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.379484 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.625898095 podStartE2EDuration="5.379462592s" podCreationTimestamp="2025-11-26 05:46:06 +0000 UTC" firstStartedPulling="2025-11-26 05:46:07.645337092 +0000 UTC m=+1225.828388678" lastFinishedPulling="2025-11-26 05:46:10.398901589 +0000 UTC m=+1228.581953175" observedRunningTime="2025-11-26 05:46:11.365727985 +0000 UTC m=+1229.548779581" watchObservedRunningTime="2025-11-26 05:46:11.379462592 +0000 UTC m=+1229.562514178" Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.423571 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.555355565 podStartE2EDuration="5.423556793s" podCreationTimestamp="2025-11-26 05:46:06 +0000 UTC" firstStartedPulling="2025-11-26 05:46:07.529584613 +0000 UTC m=+1225.712636199" lastFinishedPulling="2025-11-26 05:46:10.397785841 +0000 UTC m=+1228.580837427" observedRunningTime="2025-11-26 05:46:11.420368525 +0000 UTC m=+1229.603420111" watchObservedRunningTime="2025-11-26 05:46:11.423556793 +0000 UTC m=+1229.606608379" Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.443172 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.817869623 podStartE2EDuration="5.443149904s" podCreationTimestamp="2025-11-26 05:46:06 +0000 UTC" firstStartedPulling="2025-11-26 05:46:07.776955359 +0000 UTC m=+1225.960006945" lastFinishedPulling="2025-11-26 05:46:10.40223562 +0000 UTC m=+1228.585287226" observedRunningTime="2025-11-26 05:46:11.437077205 +0000 UTC m=+1229.620128811" watchObservedRunningTime="2025-11-26 05:46:11.443149904 +0000 UTC m=+1229.626201500" Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.717896 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.858476 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.927059 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.981155 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a85c4d89-38e1-4218-aed5-28ba79374b78-combined-ca-bundle\") pod \"a85c4d89-38e1-4218-aed5-28ba79374b78\" (UID: \"a85c4d89-38e1-4218-aed5-28ba79374b78\") " Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.981213 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a85c4d89-38e1-4218-aed5-28ba79374b78-logs\") pod \"a85c4d89-38e1-4218-aed5-28ba79374b78\" (UID: \"a85c4d89-38e1-4218-aed5-28ba79374b78\") " Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.981337 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a85c4d89-38e1-4218-aed5-28ba79374b78-config-data\") pod \"a85c4d89-38e1-4218-aed5-28ba79374b78\" (UID: \"a85c4d89-38e1-4218-aed5-28ba79374b78\") " Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.981379 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4r65b\" (UniqueName: \"kubernetes.io/projected/a85c4d89-38e1-4218-aed5-28ba79374b78-kube-api-access-4r65b\") pod \"a85c4d89-38e1-4218-aed5-28ba79374b78\" (UID: \"a85c4d89-38e1-4218-aed5-28ba79374b78\") " Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.984264 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a85c4d89-38e1-4218-aed5-28ba79374b78-logs" (OuterVolumeSpecName: "logs") pod "a85c4d89-38e1-4218-aed5-28ba79374b78" (UID: "a85c4d89-38e1-4218-aed5-28ba79374b78"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.996727 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a85c4d89-38e1-4218-aed5-28ba79374b78-kube-api-access-4r65b" (OuterVolumeSpecName: "kube-api-access-4r65b") pod "a85c4d89-38e1-4218-aed5-28ba79374b78" (UID: "a85c4d89-38e1-4218-aed5-28ba79374b78"). InnerVolumeSpecName "kube-api-access-4r65b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.998398 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4r65b\" (UniqueName: \"kubernetes.io/projected/a85c4d89-38e1-4218-aed5-28ba79374b78-kube-api-access-4r65b\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:11 crc kubenswrapper[4871]: I1126 05:46:11.998423 4871 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a85c4d89-38e1-4218-aed5-28ba79374b78-logs\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.048625 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a85c4d89-38e1-4218-aed5-28ba79374b78-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a85c4d89-38e1-4218-aed5-28ba79374b78" (UID: "a85c4d89-38e1-4218-aed5-28ba79374b78"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.062226 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a85c4d89-38e1-4218-aed5-28ba79374b78-config-data" (OuterVolumeSpecName: "config-data") pod "a85c4d89-38e1-4218-aed5-28ba79374b78" (UID: "a85c4d89-38e1-4218-aed5-28ba79374b78"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.101413 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a85c4d89-38e1-4218-aed5-28ba79374b78-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.101447 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a85c4d89-38e1-4218-aed5-28ba79374b78-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.348959 4871 generic.go:334] "Generic (PLEG): container finished" podID="a85c4d89-38e1-4218-aed5-28ba79374b78" containerID="e0ec7d18eb6f6707d1b2950f2e8bb7ce94ef281a9ec9778c992db82c7946cab8" exitCode=0 Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.349287 4871 generic.go:334] "Generic (PLEG): container finished" podID="a85c4d89-38e1-4218-aed5-28ba79374b78" containerID="badbbf5fbb4a4514b5d89276866f8994bbc94f66290504e486cfbf614720b8dc" exitCode=143 Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.349035 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a85c4d89-38e1-4218-aed5-28ba79374b78","Type":"ContainerDied","Data":"e0ec7d18eb6f6707d1b2950f2e8bb7ce94ef281a9ec9778c992db82c7946cab8"} Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.349557 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a85c4d89-38e1-4218-aed5-28ba79374b78","Type":"ContainerDied","Data":"badbbf5fbb4a4514b5d89276866f8994bbc94f66290504e486cfbf614720b8dc"} Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.349575 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a85c4d89-38e1-4218-aed5-28ba79374b78","Type":"ContainerDied","Data":"97cdce1ae84ca92afc2781d0c466431c57a092828508c680b9805a87459b8187"} Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.349013 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.349593 4871 scope.go:117] "RemoveContainer" containerID="e0ec7d18eb6f6707d1b2950f2e8bb7ce94ef281a9ec9778c992db82c7946cab8" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.385024 4871 scope.go:117] "RemoveContainer" containerID="badbbf5fbb4a4514b5d89276866f8994bbc94f66290504e486cfbf614720b8dc" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.391769 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.401359 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.419641 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:46:12 crc kubenswrapper[4871]: E1126 05:46:12.419994 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a85c4d89-38e1-4218-aed5-28ba79374b78" containerName="nova-metadata-log" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.420008 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="a85c4d89-38e1-4218-aed5-28ba79374b78" containerName="nova-metadata-log" Nov 26 05:46:12 crc kubenswrapper[4871]: E1126 05:46:12.420039 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a85c4d89-38e1-4218-aed5-28ba79374b78" containerName="nova-metadata-metadata" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.420045 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="a85c4d89-38e1-4218-aed5-28ba79374b78" containerName="nova-metadata-metadata" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.420214 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="a85c4d89-38e1-4218-aed5-28ba79374b78" containerName="nova-metadata-log" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.420235 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="a85c4d89-38e1-4218-aed5-28ba79374b78" containerName="nova-metadata-metadata" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.424264 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.426433 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.429419 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.437392 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.439826 4871 scope.go:117] "RemoveContainer" containerID="e0ec7d18eb6f6707d1b2950f2e8bb7ce94ef281a9ec9778c992db82c7946cab8" Nov 26 05:46:12 crc kubenswrapper[4871]: E1126 05:46:12.440619 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e0ec7d18eb6f6707d1b2950f2e8bb7ce94ef281a9ec9778c992db82c7946cab8\": container with ID starting with e0ec7d18eb6f6707d1b2950f2e8bb7ce94ef281a9ec9778c992db82c7946cab8 not found: ID does not exist" containerID="e0ec7d18eb6f6707d1b2950f2e8bb7ce94ef281a9ec9778c992db82c7946cab8" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.440651 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0ec7d18eb6f6707d1b2950f2e8bb7ce94ef281a9ec9778c992db82c7946cab8"} err="failed to get container status \"e0ec7d18eb6f6707d1b2950f2e8bb7ce94ef281a9ec9778c992db82c7946cab8\": rpc error: code = NotFound desc = could not find container \"e0ec7d18eb6f6707d1b2950f2e8bb7ce94ef281a9ec9778c992db82c7946cab8\": container with ID starting with e0ec7d18eb6f6707d1b2950f2e8bb7ce94ef281a9ec9778c992db82c7946cab8 not found: ID does not exist" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.440674 4871 scope.go:117] "RemoveContainer" containerID="badbbf5fbb4a4514b5d89276866f8994bbc94f66290504e486cfbf614720b8dc" Nov 26 05:46:12 crc kubenswrapper[4871]: E1126 05:46:12.443555 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"badbbf5fbb4a4514b5d89276866f8994bbc94f66290504e486cfbf614720b8dc\": container with ID starting with badbbf5fbb4a4514b5d89276866f8994bbc94f66290504e486cfbf614720b8dc not found: ID does not exist" containerID="badbbf5fbb4a4514b5d89276866f8994bbc94f66290504e486cfbf614720b8dc" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.443579 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"badbbf5fbb4a4514b5d89276866f8994bbc94f66290504e486cfbf614720b8dc"} err="failed to get container status \"badbbf5fbb4a4514b5d89276866f8994bbc94f66290504e486cfbf614720b8dc\": rpc error: code = NotFound desc = could not find container \"badbbf5fbb4a4514b5d89276866f8994bbc94f66290504e486cfbf614720b8dc\": container with ID starting with badbbf5fbb4a4514b5d89276866f8994bbc94f66290504e486cfbf614720b8dc not found: ID does not exist" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.443594 4871 scope.go:117] "RemoveContainer" containerID="e0ec7d18eb6f6707d1b2950f2e8bb7ce94ef281a9ec9778c992db82c7946cab8" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.445425 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0ec7d18eb6f6707d1b2950f2e8bb7ce94ef281a9ec9778c992db82c7946cab8"} err="failed to get container status \"e0ec7d18eb6f6707d1b2950f2e8bb7ce94ef281a9ec9778c992db82c7946cab8\": rpc error: code = NotFound desc = could not find container \"e0ec7d18eb6f6707d1b2950f2e8bb7ce94ef281a9ec9778c992db82c7946cab8\": container with ID starting with e0ec7d18eb6f6707d1b2950f2e8bb7ce94ef281a9ec9778c992db82c7946cab8 not found: ID does not exist" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.445468 4871 scope.go:117] "RemoveContainer" containerID="badbbf5fbb4a4514b5d89276866f8994bbc94f66290504e486cfbf614720b8dc" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.446203 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"badbbf5fbb4a4514b5d89276866f8994bbc94f66290504e486cfbf614720b8dc"} err="failed to get container status \"badbbf5fbb4a4514b5d89276866f8994bbc94f66290504e486cfbf614720b8dc\": rpc error: code = NotFound desc = could not find container \"badbbf5fbb4a4514b5d89276866f8994bbc94f66290504e486cfbf614720b8dc\": container with ID starting with badbbf5fbb4a4514b5d89276866f8994bbc94f66290504e486cfbf614720b8dc not found: ID does not exist" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.515946 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wq2nh\" (UniqueName: \"kubernetes.io/projected/ecc51a03-be67-453f-914a-bb878a230a41-kube-api-access-wq2nh\") pod \"nova-metadata-0\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " pod="openstack/nova-metadata-0" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.515983 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ecc51a03-be67-453f-914a-bb878a230a41-logs\") pod \"nova-metadata-0\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " pod="openstack/nova-metadata-0" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.516004 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecc51a03-be67-453f-914a-bb878a230a41-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " pod="openstack/nova-metadata-0" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.516021 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecc51a03-be67-453f-914a-bb878a230a41-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " pod="openstack/nova-metadata-0" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.516263 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecc51a03-be67-453f-914a-bb878a230a41-config-data\") pod \"nova-metadata-0\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " pod="openstack/nova-metadata-0" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.520466 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a85c4d89-38e1-4218-aed5-28ba79374b78" path="/var/lib/kubelet/pods/a85c4d89-38e1-4218-aed5-28ba79374b78/volumes" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.618982 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecc51a03-be67-453f-914a-bb878a230a41-config-data\") pod \"nova-metadata-0\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " pod="openstack/nova-metadata-0" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.619427 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wq2nh\" (UniqueName: \"kubernetes.io/projected/ecc51a03-be67-453f-914a-bb878a230a41-kube-api-access-wq2nh\") pod \"nova-metadata-0\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " pod="openstack/nova-metadata-0" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.619469 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ecc51a03-be67-453f-914a-bb878a230a41-logs\") pod \"nova-metadata-0\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " pod="openstack/nova-metadata-0" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.619497 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecc51a03-be67-453f-914a-bb878a230a41-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " pod="openstack/nova-metadata-0" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.619632 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecc51a03-be67-453f-914a-bb878a230a41-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " pod="openstack/nova-metadata-0" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.623136 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ecc51a03-be67-453f-914a-bb878a230a41-logs\") pod \"nova-metadata-0\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " pod="openstack/nova-metadata-0" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.629601 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecc51a03-be67-453f-914a-bb878a230a41-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " pod="openstack/nova-metadata-0" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.633393 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecc51a03-be67-453f-914a-bb878a230a41-config-data\") pod \"nova-metadata-0\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " pod="openstack/nova-metadata-0" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.634096 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecc51a03-be67-453f-914a-bb878a230a41-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " pod="openstack/nova-metadata-0" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.642001 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wq2nh\" (UniqueName: \"kubernetes.io/projected/ecc51a03-be67-453f-914a-bb878a230a41-kube-api-access-wq2nh\") pod \"nova-metadata-0\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " pod="openstack/nova-metadata-0" Nov 26 05:46:12 crc kubenswrapper[4871]: I1126 05:46:12.797365 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 05:46:13 crc kubenswrapper[4871]: I1126 05:46:13.325460 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:46:13 crc kubenswrapper[4871]: W1126 05:46:13.337658 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podecc51a03_be67_453f_914a_bb878a230a41.slice/crio-08d217eba22afac6ae1c161187199d3aac49ee7892615d93b9b12221f538f868 WatchSource:0}: Error finding container 08d217eba22afac6ae1c161187199d3aac49ee7892615d93b9b12221f538f868: Status 404 returned error can't find the container with id 08d217eba22afac6ae1c161187199d3aac49ee7892615d93b9b12221f538f868 Nov 26 05:46:13 crc kubenswrapper[4871]: I1126 05:46:13.363750 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ecc51a03-be67-453f-914a-bb878a230a41","Type":"ContainerStarted","Data":"08d217eba22afac6ae1c161187199d3aac49ee7892615d93b9b12221f538f868"} Nov 26 05:46:14 crc kubenswrapper[4871]: I1126 05:46:14.375294 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ecc51a03-be67-453f-914a-bb878a230a41","Type":"ContainerStarted","Data":"64ed8b2fde699fd9ec1c3f01efb5dd84ded182199dc48aa176395df41515508d"} Nov 26 05:46:14 crc kubenswrapper[4871]: I1126 05:46:14.375964 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ecc51a03-be67-453f-914a-bb878a230a41","Type":"ContainerStarted","Data":"18b9a37d94da349b9a81ec783890f95a7a3adac6c34fc5725a5304e131234839"} Nov 26 05:46:14 crc kubenswrapper[4871]: I1126 05:46:14.404702 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.404675852 podStartE2EDuration="2.404675852s" podCreationTimestamp="2025-11-26 05:46:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:46:14.393350671 +0000 UTC m=+1232.576402267" watchObservedRunningTime="2025-11-26 05:46:14.404675852 +0000 UTC m=+1232.587727458" Nov 26 05:46:16 crc kubenswrapper[4871]: I1126 05:46:16.402790 4871 generic.go:334] "Generic (PLEG): container finished" podID="fc3703e8-ee84-4c63-983b-a1f0ea6976f1" containerID="7449fc98f53ce5ac1e6441d6e3940f69dc80e9702a7a8c5beb1f1a588f708c39" exitCode=0 Nov 26 05:46:16 crc kubenswrapper[4871]: I1126 05:46:16.402880 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-l92kl" event={"ID":"fc3703e8-ee84-4c63-983b-a1f0ea6976f1","Type":"ContainerDied","Data":"7449fc98f53ce5ac1e6441d6e3940f69dc80e9702a7a8c5beb1f1a588f708c39"} Nov 26 05:46:16 crc kubenswrapper[4871]: I1126 05:46:16.717760 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 05:46:16 crc kubenswrapper[4871]: I1126 05:46:16.762602 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 05:46:17 crc kubenswrapper[4871]: I1126 05:46:17.064110 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 05:46:17 crc kubenswrapper[4871]: I1126 05:46:17.064166 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 05:46:17 crc kubenswrapper[4871]: I1126 05:46:17.163715 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:17 crc kubenswrapper[4871]: I1126 05:46:17.248429 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75958fc765-m7jqm"] Nov 26 05:46:17 crc kubenswrapper[4871]: I1126 05:46:17.248719 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-75958fc765-m7jqm" podUID="d2bb32df-62a8-451e-a469-464d621a12d2" containerName="dnsmasq-dns" containerID="cri-o://b8e969a5d5e59b0221edc667450c8de831c8bdee920d320a70eaedfccc530bcc" gracePeriod=10 Nov 26 05:46:17 crc kubenswrapper[4871]: I1126 05:46:17.415568 4871 generic.go:334] "Generic (PLEG): container finished" podID="d2bb32df-62a8-451e-a469-464d621a12d2" containerID="b8e969a5d5e59b0221edc667450c8de831c8bdee920d320a70eaedfccc530bcc" exitCode=0 Nov 26 05:46:17 crc kubenswrapper[4871]: I1126 05:46:17.416618 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75958fc765-m7jqm" event={"ID":"d2bb32df-62a8-451e-a469-464d621a12d2","Type":"ContainerDied","Data":"b8e969a5d5e59b0221edc667450c8de831c8bdee920d320a70eaedfccc530bcc"} Nov 26 05:46:17 crc kubenswrapper[4871]: I1126 05:46:17.476297 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 05:46:17 crc kubenswrapper[4871]: I1126 05:46:17.798047 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 05:46:17 crc kubenswrapper[4871]: I1126 05:46:17.798106 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.147197 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.209:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.147428 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.209:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.430236 4871 generic.go:334] "Generic (PLEG): container finished" podID="8f388a8a-48e4-4503-a842-23c380a1c649" containerID="53ab1f13208b324ec2c352f7b58f323415a3adda284c7b3e7124f03afa93fb3e" exitCode=0 Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.430300 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-22j7b" event={"ID":"8f388a8a-48e4-4503-a842-23c380a1c649","Type":"ContainerDied","Data":"53ab1f13208b324ec2c352f7b58f323415a3adda284c7b3e7124f03afa93fb3e"} Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.432645 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-l92kl" event={"ID":"fc3703e8-ee84-4c63-983b-a1f0ea6976f1","Type":"ContainerDied","Data":"ade9232099e383a4dfd1291baf610ead0aa330e1c639fdabb8add6b453d32168"} Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.432682 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ade9232099e383a4dfd1291baf610ead0aa330e1c639fdabb8add6b453d32168" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.443787 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75958fc765-m7jqm" event={"ID":"d2bb32df-62a8-451e-a469-464d621a12d2","Type":"ContainerDied","Data":"ca2ddd24dc51a5a55a3ce71cc9e024b1b182f6ce8d94c77ddc7d039e024e152e"} Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.443831 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca2ddd24dc51a5a55a3ce71cc9e024b1b182f6ce8d94c77ddc7d039e024e152e" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.477409 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-l92kl" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.483029 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.554578 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-dns-swift-storage-0\") pod \"d2bb32df-62a8-451e-a469-464d621a12d2\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.554651 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6h4h\" (UniqueName: \"kubernetes.io/projected/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-kube-api-access-c6h4h\") pod \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\" (UID: \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\") " Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.554686 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-scripts\") pod \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\" (UID: \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\") " Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.554750 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-combined-ca-bundle\") pod \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\" (UID: \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\") " Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.554783 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pgs84\" (UniqueName: \"kubernetes.io/projected/d2bb32df-62a8-451e-a469-464d621a12d2-kube-api-access-pgs84\") pod \"d2bb32df-62a8-451e-a469-464d621a12d2\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.554805 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-config-data\") pod \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\" (UID: \"fc3703e8-ee84-4c63-983b-a1f0ea6976f1\") " Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.554884 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-ovsdbserver-sb\") pod \"d2bb32df-62a8-451e-a469-464d621a12d2\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.554934 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-dns-svc\") pod \"d2bb32df-62a8-451e-a469-464d621a12d2\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.554970 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-ovsdbserver-nb\") pod \"d2bb32df-62a8-451e-a469-464d621a12d2\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.554994 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-config\") pod \"d2bb32df-62a8-451e-a469-464d621a12d2\" (UID: \"d2bb32df-62a8-451e-a469-464d621a12d2\") " Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.563840 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-scripts" (OuterVolumeSpecName: "scripts") pod "fc3703e8-ee84-4c63-983b-a1f0ea6976f1" (UID: "fc3703e8-ee84-4c63-983b-a1f0ea6976f1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.564510 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-kube-api-access-c6h4h" (OuterVolumeSpecName: "kube-api-access-c6h4h") pod "fc3703e8-ee84-4c63-983b-a1f0ea6976f1" (UID: "fc3703e8-ee84-4c63-983b-a1f0ea6976f1"). InnerVolumeSpecName "kube-api-access-c6h4h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.567923 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2bb32df-62a8-451e-a469-464d621a12d2-kube-api-access-pgs84" (OuterVolumeSpecName: "kube-api-access-pgs84") pod "d2bb32df-62a8-451e-a469-464d621a12d2" (UID: "d2bb32df-62a8-451e-a469-464d621a12d2"). InnerVolumeSpecName "kube-api-access-pgs84". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.625665 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fc3703e8-ee84-4c63-983b-a1f0ea6976f1" (UID: "fc3703e8-ee84-4c63-983b-a1f0ea6976f1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.626680 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-config-data" (OuterVolumeSpecName: "config-data") pod "fc3703e8-ee84-4c63-983b-a1f0ea6976f1" (UID: "fc3703e8-ee84-4c63-983b-a1f0ea6976f1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.657707 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6h4h\" (UniqueName: \"kubernetes.io/projected/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-kube-api-access-c6h4h\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.657739 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.657747 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.657757 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pgs84\" (UniqueName: \"kubernetes.io/projected/d2bb32df-62a8-451e-a469-464d621a12d2-kube-api-access-pgs84\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.657766 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc3703e8-ee84-4c63-983b-a1f0ea6976f1-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.679355 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d2bb32df-62a8-451e-a469-464d621a12d2" (UID: "d2bb32df-62a8-451e-a469-464d621a12d2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.679833 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d2bb32df-62a8-451e-a469-464d621a12d2" (UID: "d2bb32df-62a8-451e-a469-464d621a12d2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.693032 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d2bb32df-62a8-451e-a469-464d621a12d2" (UID: "d2bb32df-62a8-451e-a469-464d621a12d2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.703663 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d2bb32df-62a8-451e-a469-464d621a12d2" (UID: "d2bb32df-62a8-451e-a469-464d621a12d2"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.716910 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-config" (OuterVolumeSpecName: "config") pod "d2bb32df-62a8-451e-a469-464d621a12d2" (UID: "d2bb32df-62a8-451e-a469-464d621a12d2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.759103 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.759135 4871 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.759146 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.759154 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:18 crc kubenswrapper[4871]: I1126 05:46:18.759162 4871 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d2bb32df-62a8-451e-a469-464d621a12d2-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:19 crc kubenswrapper[4871]: I1126 05:46:19.451635 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-l92kl" Nov 26 05:46:19 crc kubenswrapper[4871]: I1126 05:46:19.451870 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75958fc765-m7jqm" Nov 26 05:46:19 crc kubenswrapper[4871]: I1126 05:46:19.495358 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75958fc765-m7jqm"] Nov 26 05:46:19 crc kubenswrapper[4871]: I1126 05:46:19.516572 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75958fc765-m7jqm"] Nov 26 05:46:19 crc kubenswrapper[4871]: I1126 05:46:19.700601 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 05:46:19 crc kubenswrapper[4871]: I1126 05:46:19.700898 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88" containerName="nova-api-log" containerID="cri-o://6ce936f7e5bb9ed3a9b04f43b5473706a905454a61e9c2b33cb767530c3a4320" gracePeriod=30 Nov 26 05:46:19 crc kubenswrapper[4871]: I1126 05:46:19.701429 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88" containerName="nova-api-api" containerID="cri-o://981dee769ebd34f3bc41a9fda57d3ec676abbcf0ba4c1b94a50c29cf3cc02562" gracePeriod=30 Nov 26 05:46:19 crc kubenswrapper[4871]: I1126 05:46:19.714692 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:46:19 crc kubenswrapper[4871]: I1126 05:46:19.715154 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ecc51a03-be67-453f-914a-bb878a230a41" containerName="nova-metadata-log" containerID="cri-o://18b9a37d94da349b9a81ec783890f95a7a3adac6c34fc5725a5304e131234839" gracePeriod=30 Nov 26 05:46:19 crc kubenswrapper[4871]: I1126 05:46:19.715382 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ecc51a03-be67-453f-914a-bb878a230a41" containerName="nova-metadata-metadata" containerID="cri-o://64ed8b2fde699fd9ec1c3f01efb5dd84ded182199dc48aa176395df41515508d" gracePeriod=30 Nov 26 05:46:19 crc kubenswrapper[4871]: I1126 05:46:19.731692 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 05:46:19 crc kubenswrapper[4871]: I1126 05:46:19.731891 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="686e3965-014e-4639-be92-909cf5e0d6b0" containerName="nova-scheduler-scheduler" containerID="cri-o://22e6cb128ffea469e61941b44734ce0419cd3e3dc34120398d2b050072781072" gracePeriod=30 Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.004991 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-22j7b" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.084197 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f388a8a-48e4-4503-a842-23c380a1c649-combined-ca-bundle\") pod \"8f388a8a-48e4-4503-a842-23c380a1c649\" (UID: \"8f388a8a-48e4-4503-a842-23c380a1c649\") " Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.084349 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s8bc8\" (UniqueName: \"kubernetes.io/projected/8f388a8a-48e4-4503-a842-23c380a1c649-kube-api-access-s8bc8\") pod \"8f388a8a-48e4-4503-a842-23c380a1c649\" (UID: \"8f388a8a-48e4-4503-a842-23c380a1c649\") " Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.084441 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f388a8a-48e4-4503-a842-23c380a1c649-scripts\") pod \"8f388a8a-48e4-4503-a842-23c380a1c649\" (UID: \"8f388a8a-48e4-4503-a842-23c380a1c649\") " Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.084468 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f388a8a-48e4-4503-a842-23c380a1c649-config-data\") pod \"8f388a8a-48e4-4503-a842-23c380a1c649\" (UID: \"8f388a8a-48e4-4503-a842-23c380a1c649\") " Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.088973 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f388a8a-48e4-4503-a842-23c380a1c649-kube-api-access-s8bc8" (OuterVolumeSpecName: "kube-api-access-s8bc8") pod "8f388a8a-48e4-4503-a842-23c380a1c649" (UID: "8f388a8a-48e4-4503-a842-23c380a1c649"). InnerVolumeSpecName "kube-api-access-s8bc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.093497 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f388a8a-48e4-4503-a842-23c380a1c649-scripts" (OuterVolumeSpecName: "scripts") pod "8f388a8a-48e4-4503-a842-23c380a1c649" (UID: "8f388a8a-48e4-4503-a842-23c380a1c649"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.126737 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f388a8a-48e4-4503-a842-23c380a1c649-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8f388a8a-48e4-4503-a842-23c380a1c649" (UID: "8f388a8a-48e4-4503-a842-23c380a1c649"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.137615 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f388a8a-48e4-4503-a842-23c380a1c649-config-data" (OuterVolumeSpecName: "config-data") pod "8f388a8a-48e4-4503-a842-23c380a1c649" (UID: "8f388a8a-48e4-4503-a842-23c380a1c649"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.186888 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s8bc8\" (UniqueName: \"kubernetes.io/projected/8f388a8a-48e4-4503-a842-23c380a1c649-kube-api-access-s8bc8\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.187180 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f388a8a-48e4-4503-a842-23c380a1c649-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.187243 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f388a8a-48e4-4503-a842-23c380a1c649-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.187299 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f388a8a-48e4-4503-a842-23c380a1c649-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.238582 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.391436 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ecc51a03-be67-453f-914a-bb878a230a41-logs\") pod \"ecc51a03-be67-453f-914a-bb878a230a41\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.391729 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecc51a03-be67-453f-914a-bb878a230a41-combined-ca-bundle\") pod \"ecc51a03-be67-453f-914a-bb878a230a41\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.391786 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecc51a03-be67-453f-914a-bb878a230a41-nova-metadata-tls-certs\") pod \"ecc51a03-be67-453f-914a-bb878a230a41\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.392028 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ecc51a03-be67-453f-914a-bb878a230a41-logs" (OuterVolumeSpecName: "logs") pod "ecc51a03-be67-453f-914a-bb878a230a41" (UID: "ecc51a03-be67-453f-914a-bb878a230a41"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.392066 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecc51a03-be67-453f-914a-bb878a230a41-config-data\") pod \"ecc51a03-be67-453f-914a-bb878a230a41\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.392221 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wq2nh\" (UniqueName: \"kubernetes.io/projected/ecc51a03-be67-453f-914a-bb878a230a41-kube-api-access-wq2nh\") pod \"ecc51a03-be67-453f-914a-bb878a230a41\" (UID: \"ecc51a03-be67-453f-914a-bb878a230a41\") " Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.392876 4871 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ecc51a03-be67-453f-914a-bb878a230a41-logs\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.396781 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ecc51a03-be67-453f-914a-bb878a230a41-kube-api-access-wq2nh" (OuterVolumeSpecName: "kube-api-access-wq2nh") pod "ecc51a03-be67-453f-914a-bb878a230a41" (UID: "ecc51a03-be67-453f-914a-bb878a230a41"). InnerVolumeSpecName "kube-api-access-wq2nh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.419782 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecc51a03-be67-453f-914a-bb878a230a41-config-data" (OuterVolumeSpecName: "config-data") pod "ecc51a03-be67-453f-914a-bb878a230a41" (UID: "ecc51a03-be67-453f-914a-bb878a230a41"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.429413 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecc51a03-be67-453f-914a-bb878a230a41-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ecc51a03-be67-453f-914a-bb878a230a41" (UID: "ecc51a03-be67-453f-914a-bb878a230a41"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.450627 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecc51a03-be67-453f-914a-bb878a230a41-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "ecc51a03-be67-453f-914a-bb878a230a41" (UID: "ecc51a03-be67-453f-914a-bb878a230a41"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.486816 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-22j7b" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.487112 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-22j7b" event={"ID":"8f388a8a-48e4-4503-a842-23c380a1c649","Type":"ContainerDied","Data":"a4b9b195f30f1fb27870545cc5f335218ea649339eaa161cf1986998f23ed4a0"} Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.487169 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4b9b195f30f1fb27870545cc5f335218ea649339eaa161cf1986998f23ed4a0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.494372 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecc51a03-be67-453f-914a-bb878a230a41-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.494405 4871 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecc51a03-be67-453f-914a-bb878a230a41-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.494415 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ecc51a03-be67-453f-914a-bb878a230a41-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.494426 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wq2nh\" (UniqueName: \"kubernetes.io/projected/ecc51a03-be67-453f-914a-bb878a230a41-kube-api-access-wq2nh\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.498602 4871 generic.go:334] "Generic (PLEG): container finished" podID="ecc51a03-be67-453f-914a-bb878a230a41" containerID="64ed8b2fde699fd9ec1c3f01efb5dd84ded182199dc48aa176395df41515508d" exitCode=0 Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.498643 4871 generic.go:334] "Generic (PLEG): container finished" podID="ecc51a03-be67-453f-914a-bb878a230a41" containerID="18b9a37d94da349b9a81ec783890f95a7a3adac6c34fc5725a5304e131234839" exitCode=143 Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.498690 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.498704 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ecc51a03-be67-453f-914a-bb878a230a41","Type":"ContainerDied","Data":"64ed8b2fde699fd9ec1c3f01efb5dd84ded182199dc48aa176395df41515508d"} Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.498738 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ecc51a03-be67-453f-914a-bb878a230a41","Type":"ContainerDied","Data":"18b9a37d94da349b9a81ec783890f95a7a3adac6c34fc5725a5304e131234839"} Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.498753 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ecc51a03-be67-453f-914a-bb878a230a41","Type":"ContainerDied","Data":"08d217eba22afac6ae1c161187199d3aac49ee7892615d93b9b12221f538f868"} Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.498772 4871 scope.go:117] "RemoveContainer" containerID="64ed8b2fde699fd9ec1c3f01efb5dd84ded182199dc48aa176395df41515508d" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.519918 4871 generic.go:334] "Generic (PLEG): container finished" podID="3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88" containerID="6ce936f7e5bb9ed3a9b04f43b5473706a905454a61e9c2b33cb767530c3a4320" exitCode=143 Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.568298 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2bb32df-62a8-451e-a469-464d621a12d2" path="/var/lib/kubelet/pods/d2bb32df-62a8-451e-a469-464d621a12d2/volumes" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.569674 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 05:46:20 crc kubenswrapper[4871]: E1126 05:46:20.570240 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f388a8a-48e4-4503-a842-23c380a1c649" containerName="nova-cell1-conductor-db-sync" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.570261 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f388a8a-48e4-4503-a842-23c380a1c649" containerName="nova-cell1-conductor-db-sync" Nov 26 05:46:20 crc kubenswrapper[4871]: E1126 05:46:20.570297 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc3703e8-ee84-4c63-983b-a1f0ea6976f1" containerName="nova-manage" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.570304 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc3703e8-ee84-4c63-983b-a1f0ea6976f1" containerName="nova-manage" Nov 26 05:46:20 crc kubenswrapper[4871]: E1126 05:46:20.570319 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecc51a03-be67-453f-914a-bb878a230a41" containerName="nova-metadata-metadata" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.570327 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecc51a03-be67-453f-914a-bb878a230a41" containerName="nova-metadata-metadata" Nov 26 05:46:20 crc kubenswrapper[4871]: E1126 05:46:20.577105 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2bb32df-62a8-451e-a469-464d621a12d2" containerName="init" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.577168 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2bb32df-62a8-451e-a469-464d621a12d2" containerName="init" Nov 26 05:46:20 crc kubenswrapper[4871]: E1126 05:46:20.577223 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecc51a03-be67-453f-914a-bb878a230a41" containerName="nova-metadata-log" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.577230 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecc51a03-be67-453f-914a-bb878a230a41" containerName="nova-metadata-log" Nov 26 05:46:20 crc kubenswrapper[4871]: E1126 05:46:20.577264 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2bb32df-62a8-451e-a469-464d621a12d2" containerName="dnsmasq-dns" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.577272 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2bb32df-62a8-451e-a469-464d621a12d2" containerName="dnsmasq-dns" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.577721 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc3703e8-ee84-4c63-983b-a1f0ea6976f1" containerName="nova-manage" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.577732 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2bb32df-62a8-451e-a469-464d621a12d2" containerName="dnsmasq-dns" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.577751 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="ecc51a03-be67-453f-914a-bb878a230a41" containerName="nova-metadata-log" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.577764 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f388a8a-48e4-4503-a842-23c380a1c649" containerName="nova-cell1-conductor-db-sync" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.577776 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="ecc51a03-be67-453f-914a-bb878a230a41" containerName="nova-metadata-metadata" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.581002 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88","Type":"ContainerDied","Data":"6ce936f7e5bb9ed3a9b04f43b5473706a905454a61e9c2b33cb767530c3a4320"} Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.581038 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.581055 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.581067 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.581134 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.584977 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.587224 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.589576 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.601310 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.601900 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.603984 4871 scope.go:117] "RemoveContainer" containerID="18b9a37d94da349b9a81ec783890f95a7a3adac6c34fc5725a5304e131234839" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.627785 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.651892 4871 scope.go:117] "RemoveContainer" containerID="64ed8b2fde699fd9ec1c3f01efb5dd84ded182199dc48aa176395df41515508d" Nov 26 05:46:20 crc kubenswrapper[4871]: E1126 05:46:20.652426 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64ed8b2fde699fd9ec1c3f01efb5dd84ded182199dc48aa176395df41515508d\": container with ID starting with 64ed8b2fde699fd9ec1c3f01efb5dd84ded182199dc48aa176395df41515508d not found: ID does not exist" containerID="64ed8b2fde699fd9ec1c3f01efb5dd84ded182199dc48aa176395df41515508d" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.652474 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64ed8b2fde699fd9ec1c3f01efb5dd84ded182199dc48aa176395df41515508d"} err="failed to get container status \"64ed8b2fde699fd9ec1c3f01efb5dd84ded182199dc48aa176395df41515508d\": rpc error: code = NotFound desc = could not find container \"64ed8b2fde699fd9ec1c3f01efb5dd84ded182199dc48aa176395df41515508d\": container with ID starting with 64ed8b2fde699fd9ec1c3f01efb5dd84ded182199dc48aa176395df41515508d not found: ID does not exist" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.652505 4871 scope.go:117] "RemoveContainer" containerID="18b9a37d94da349b9a81ec783890f95a7a3adac6c34fc5725a5304e131234839" Nov 26 05:46:20 crc kubenswrapper[4871]: E1126 05:46:20.652933 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18b9a37d94da349b9a81ec783890f95a7a3adac6c34fc5725a5304e131234839\": container with ID starting with 18b9a37d94da349b9a81ec783890f95a7a3adac6c34fc5725a5304e131234839 not found: ID does not exist" containerID="18b9a37d94da349b9a81ec783890f95a7a3adac6c34fc5725a5304e131234839" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.652990 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18b9a37d94da349b9a81ec783890f95a7a3adac6c34fc5725a5304e131234839"} err="failed to get container status \"18b9a37d94da349b9a81ec783890f95a7a3adac6c34fc5725a5304e131234839\": rpc error: code = NotFound desc = could not find container \"18b9a37d94da349b9a81ec783890f95a7a3adac6c34fc5725a5304e131234839\": container with ID starting with 18b9a37d94da349b9a81ec783890f95a7a3adac6c34fc5725a5304e131234839 not found: ID does not exist" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.653023 4871 scope.go:117] "RemoveContainer" containerID="64ed8b2fde699fd9ec1c3f01efb5dd84ded182199dc48aa176395df41515508d" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.653327 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64ed8b2fde699fd9ec1c3f01efb5dd84ded182199dc48aa176395df41515508d"} err="failed to get container status \"64ed8b2fde699fd9ec1c3f01efb5dd84ded182199dc48aa176395df41515508d\": rpc error: code = NotFound desc = could not find container \"64ed8b2fde699fd9ec1c3f01efb5dd84ded182199dc48aa176395df41515508d\": container with ID starting with 64ed8b2fde699fd9ec1c3f01efb5dd84ded182199dc48aa176395df41515508d not found: ID does not exist" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.653360 4871 scope.go:117] "RemoveContainer" containerID="18b9a37d94da349b9a81ec783890f95a7a3adac6c34fc5725a5304e131234839" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.653596 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18b9a37d94da349b9a81ec783890f95a7a3adac6c34fc5725a5304e131234839"} err="failed to get container status \"18b9a37d94da349b9a81ec783890f95a7a3adac6c34fc5725a5304e131234839\": rpc error: code = NotFound desc = could not find container \"18b9a37d94da349b9a81ec783890f95a7a3adac6c34fc5725a5304e131234839\": container with ID starting with 18b9a37d94da349b9a81ec783890f95a7a3adac6c34fc5725a5304e131234839 not found: ID does not exist" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.701123 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a703f1e3-b021-4fe0-9c3f-a5a90b96678e-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"a703f1e3-b021-4fe0-9c3f-a5a90b96678e\") " pod="openstack/nova-cell1-conductor-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.701173 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9af59634-8618-4934-8a71-606bcde10c43-config-data\") pod \"nova-metadata-0\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " pod="openstack/nova-metadata-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.701215 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9af59634-8618-4934-8a71-606bcde10c43-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " pod="openstack/nova-metadata-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.701272 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9af59634-8618-4934-8a71-606bcde10c43-logs\") pod \"nova-metadata-0\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " pod="openstack/nova-metadata-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.701291 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4bk4\" (UniqueName: \"kubernetes.io/projected/9af59634-8618-4934-8a71-606bcde10c43-kube-api-access-k4bk4\") pod \"nova-metadata-0\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " pod="openstack/nova-metadata-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.701309 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7mvp\" (UniqueName: \"kubernetes.io/projected/a703f1e3-b021-4fe0-9c3f-a5a90b96678e-kube-api-access-d7mvp\") pod \"nova-cell1-conductor-0\" (UID: \"a703f1e3-b021-4fe0-9c3f-a5a90b96678e\") " pod="openstack/nova-cell1-conductor-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.701331 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a703f1e3-b021-4fe0-9c3f-a5a90b96678e-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"a703f1e3-b021-4fe0-9c3f-a5a90b96678e\") " pod="openstack/nova-cell1-conductor-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.701393 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9af59634-8618-4934-8a71-606bcde10c43-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " pod="openstack/nova-metadata-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.803285 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4bk4\" (UniqueName: \"kubernetes.io/projected/9af59634-8618-4934-8a71-606bcde10c43-kube-api-access-k4bk4\") pod \"nova-metadata-0\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " pod="openstack/nova-metadata-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.803327 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9af59634-8618-4934-8a71-606bcde10c43-logs\") pod \"nova-metadata-0\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " pod="openstack/nova-metadata-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.803350 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7mvp\" (UniqueName: \"kubernetes.io/projected/a703f1e3-b021-4fe0-9c3f-a5a90b96678e-kube-api-access-d7mvp\") pod \"nova-cell1-conductor-0\" (UID: \"a703f1e3-b021-4fe0-9c3f-a5a90b96678e\") " pod="openstack/nova-cell1-conductor-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.803375 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a703f1e3-b021-4fe0-9c3f-a5a90b96678e-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"a703f1e3-b021-4fe0-9c3f-a5a90b96678e\") " pod="openstack/nova-cell1-conductor-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.803405 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9af59634-8618-4934-8a71-606bcde10c43-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " pod="openstack/nova-metadata-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.803542 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a703f1e3-b021-4fe0-9c3f-a5a90b96678e-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"a703f1e3-b021-4fe0-9c3f-a5a90b96678e\") " pod="openstack/nova-cell1-conductor-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.803583 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9af59634-8618-4934-8a71-606bcde10c43-config-data\") pod \"nova-metadata-0\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " pod="openstack/nova-metadata-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.803633 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9af59634-8618-4934-8a71-606bcde10c43-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " pod="openstack/nova-metadata-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.803768 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9af59634-8618-4934-8a71-606bcde10c43-logs\") pod \"nova-metadata-0\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " pod="openstack/nova-metadata-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.808552 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9af59634-8618-4934-8a71-606bcde10c43-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " pod="openstack/nova-metadata-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.809286 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a703f1e3-b021-4fe0-9c3f-a5a90b96678e-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"a703f1e3-b021-4fe0-9c3f-a5a90b96678e\") " pod="openstack/nova-cell1-conductor-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.809306 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a703f1e3-b021-4fe0-9c3f-a5a90b96678e-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"a703f1e3-b021-4fe0-9c3f-a5a90b96678e\") " pod="openstack/nova-cell1-conductor-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.809695 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9af59634-8618-4934-8a71-606bcde10c43-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " pod="openstack/nova-metadata-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.812865 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9af59634-8618-4934-8a71-606bcde10c43-config-data\") pod \"nova-metadata-0\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " pod="openstack/nova-metadata-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.826183 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4bk4\" (UniqueName: \"kubernetes.io/projected/9af59634-8618-4934-8a71-606bcde10c43-kube-api-access-k4bk4\") pod \"nova-metadata-0\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " pod="openstack/nova-metadata-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.830599 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7mvp\" (UniqueName: \"kubernetes.io/projected/a703f1e3-b021-4fe0-9c3f-a5a90b96678e-kube-api-access-d7mvp\") pod \"nova-cell1-conductor-0\" (UID: \"a703f1e3-b021-4fe0-9c3f-a5a90b96678e\") " pod="openstack/nova-cell1-conductor-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.913478 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 05:46:20 crc kubenswrapper[4871]: I1126 05:46:20.939720 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 05:46:21 crc kubenswrapper[4871]: W1126 05:46:21.515995 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda703f1e3_b021_4fe0_9c3f_a5a90b96678e.slice/crio-b29545f757b752da170a927e08c97d23cd4c76f8d431fad918d177cc4a751868 WatchSource:0}: Error finding container b29545f757b752da170a927e08c97d23cd4c76f8d431fad918d177cc4a751868: Status 404 returned error can't find the container with id b29545f757b752da170a927e08c97d23cd4c76f8d431fad918d177cc4a751868 Nov 26 05:46:21 crc kubenswrapper[4871]: I1126 05:46:21.521416 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 05:46:21 crc kubenswrapper[4871]: I1126 05:46:21.533473 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"a703f1e3-b021-4fe0-9c3f-a5a90b96678e","Type":"ContainerStarted","Data":"b29545f757b752da170a927e08c97d23cd4c76f8d431fad918d177cc4a751868"} Nov 26 05:46:21 crc kubenswrapper[4871]: I1126 05:46:21.569954 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:46:21 crc kubenswrapper[4871]: W1126 05:46:21.574474 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9af59634_8618_4934_8a71_606bcde10c43.slice/crio-42d05905e92f2befc9b3ec3dc89e6a3fe8598cd46dfc468f374cbae02e7013fd WatchSource:0}: Error finding container 42d05905e92f2befc9b3ec3dc89e6a3fe8598cd46dfc468f374cbae02e7013fd: Status 404 returned error can't find the container with id 42d05905e92f2befc9b3ec3dc89e6a3fe8598cd46dfc468f374cbae02e7013fd Nov 26 05:46:21 crc kubenswrapper[4871]: E1126 05:46:21.730150 4871 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="22e6cb128ffea469e61941b44734ce0419cd3e3dc34120398d2b050072781072" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 05:46:21 crc kubenswrapper[4871]: E1126 05:46:21.736843 4871 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="22e6cb128ffea469e61941b44734ce0419cd3e3dc34120398d2b050072781072" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 05:46:21 crc kubenswrapper[4871]: E1126 05:46:21.738600 4871 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="22e6cb128ffea469e61941b44734ce0419cd3e3dc34120398d2b050072781072" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 05:46:21 crc kubenswrapper[4871]: E1126 05:46:21.738684 4871 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="686e3965-014e-4639-be92-909cf5e0d6b0" containerName="nova-scheduler-scheduler" Nov 26 05:46:22 crc kubenswrapper[4871]: I1126 05:46:22.497959 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 26 05:46:22 crc kubenswrapper[4871]: I1126 05:46:22.523093 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ecc51a03-be67-453f-914a-bb878a230a41" path="/var/lib/kubelet/pods/ecc51a03-be67-453f-914a-bb878a230a41/volumes" Nov 26 05:46:22 crc kubenswrapper[4871]: I1126 05:46:22.583089 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9af59634-8618-4934-8a71-606bcde10c43","Type":"ContainerStarted","Data":"5ac1d3eed6816da396a8d7235c2650aa398b83b8ab2e071bba92aa0736c529cd"} Nov 26 05:46:22 crc kubenswrapper[4871]: I1126 05:46:22.583128 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9af59634-8618-4934-8a71-606bcde10c43","Type":"ContainerStarted","Data":"f875ba7d40b8e668c71b071714d435ca9aef02c364572f25828337bd34d78636"} Nov 26 05:46:22 crc kubenswrapper[4871]: I1126 05:46:22.583142 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9af59634-8618-4934-8a71-606bcde10c43","Type":"ContainerStarted","Data":"42d05905e92f2befc9b3ec3dc89e6a3fe8598cd46dfc468f374cbae02e7013fd"} Nov 26 05:46:22 crc kubenswrapper[4871]: I1126 05:46:22.585822 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"a703f1e3-b021-4fe0-9c3f-a5a90b96678e","Type":"ContainerStarted","Data":"830798e7ed4ff0c05b7d5b1e17632a57e987e02f99b3dae7c6afe43a630b36d2"} Nov 26 05:46:22 crc kubenswrapper[4871]: I1126 05:46:22.586257 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 26 05:46:22 crc kubenswrapper[4871]: I1126 05:46:22.618523 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.618505234 podStartE2EDuration="2.618505234s" podCreationTimestamp="2025-11-26 05:46:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:46:22.61312534 +0000 UTC m=+1240.796176926" watchObservedRunningTime="2025-11-26 05:46:22.618505234 +0000 UTC m=+1240.801556820" Nov 26 05:46:22 crc kubenswrapper[4871]: I1126 05:46:22.638634 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.638617792 podStartE2EDuration="2.638617792s" podCreationTimestamp="2025-11-26 05:46:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:46:22.631035564 +0000 UTC m=+1240.814087150" watchObservedRunningTime="2025-11-26 05:46:22.638617792 +0000 UTC m=+1240.821669378" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.234096 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.288604 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.361869 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-combined-ca-bundle\") pod \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\" (UID: \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\") " Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.362034 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrflt\" (UniqueName: \"kubernetes.io/projected/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-kube-api-access-nrflt\") pod \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\" (UID: \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\") " Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.362080 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/686e3965-014e-4639-be92-909cf5e0d6b0-config-data\") pod \"686e3965-014e-4639-be92-909cf5e0d6b0\" (UID: \"686e3965-014e-4639-be92-909cf5e0d6b0\") " Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.362151 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/686e3965-014e-4639-be92-909cf5e0d6b0-combined-ca-bundle\") pod \"686e3965-014e-4639-be92-909cf5e0d6b0\" (UID: \"686e3965-014e-4639-be92-909cf5e0d6b0\") " Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.362178 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-logs\") pod \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\" (UID: \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\") " Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.362202 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nm6zj\" (UniqueName: \"kubernetes.io/projected/686e3965-014e-4639-be92-909cf5e0d6b0-kube-api-access-nm6zj\") pod \"686e3965-014e-4639-be92-909cf5e0d6b0\" (UID: \"686e3965-014e-4639-be92-909cf5e0d6b0\") " Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.362243 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-config-data\") pod \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\" (UID: \"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88\") " Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.363861 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-logs" (OuterVolumeSpecName: "logs") pod "3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88" (UID: "3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.367409 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-kube-api-access-nrflt" (OuterVolumeSpecName: "kube-api-access-nrflt") pod "3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88" (UID: "3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88"). InnerVolumeSpecName "kube-api-access-nrflt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.388796 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88" (UID: "3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.394128 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/686e3965-014e-4639-be92-909cf5e0d6b0-config-data" (OuterVolumeSpecName: "config-data") pod "686e3965-014e-4639-be92-909cf5e0d6b0" (UID: "686e3965-014e-4639-be92-909cf5e0d6b0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.396159 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/686e3965-014e-4639-be92-909cf5e0d6b0-kube-api-access-nm6zj" (OuterVolumeSpecName: "kube-api-access-nm6zj") pod "686e3965-014e-4639-be92-909cf5e0d6b0" (UID: "686e3965-014e-4639-be92-909cf5e0d6b0"). InnerVolumeSpecName "kube-api-access-nm6zj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.399805 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/686e3965-014e-4639-be92-909cf5e0d6b0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "686e3965-014e-4639-be92-909cf5e0d6b0" (UID: "686e3965-014e-4639-be92-909cf5e0d6b0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.408250 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-config-data" (OuterVolumeSpecName: "config-data") pod "3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88" (UID: "3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.464150 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrflt\" (UniqueName: \"kubernetes.io/projected/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-kube-api-access-nrflt\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.464175 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/686e3965-014e-4639-be92-909cf5e0d6b0-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.464186 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/686e3965-014e-4639-be92-909cf5e0d6b0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.464195 4871 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-logs\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.464207 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nm6zj\" (UniqueName: \"kubernetes.io/projected/686e3965-014e-4639-be92-909cf5e0d6b0-kube-api-access-nm6zj\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.464215 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.464225 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.597487 4871 generic.go:334] "Generic (PLEG): container finished" podID="686e3965-014e-4639-be92-909cf5e0d6b0" containerID="22e6cb128ffea469e61941b44734ce0419cd3e3dc34120398d2b050072781072" exitCode=0 Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.597555 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"686e3965-014e-4639-be92-909cf5e0d6b0","Type":"ContainerDied","Data":"22e6cb128ffea469e61941b44734ce0419cd3e3dc34120398d2b050072781072"} Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.597583 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"686e3965-014e-4639-be92-909cf5e0d6b0","Type":"ContainerDied","Data":"24cf430787fee6e3238b6891663eecaf272fc57ed6cf8a59d69258d9ec56d4f9"} Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.597599 4871 scope.go:117] "RemoveContainer" containerID="22e6cb128ffea469e61941b44734ce0419cd3e3dc34120398d2b050072781072" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.597706 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.604679 4871 generic.go:334] "Generic (PLEG): container finished" podID="3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88" containerID="981dee769ebd34f3bc41a9fda57d3ec676abbcf0ba4c1b94a50c29cf3cc02562" exitCode=0 Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.605975 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.611360 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88","Type":"ContainerDied","Data":"981dee769ebd34f3bc41a9fda57d3ec676abbcf0ba4c1b94a50c29cf3cc02562"} Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.611410 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88","Type":"ContainerDied","Data":"3f1d1b9248b3e6ebc16effdce7727297aff37b26861ff9ddf016259e8d7312f5"} Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.629753 4871 scope.go:117] "RemoveContainer" containerID="22e6cb128ffea469e61941b44734ce0419cd3e3dc34120398d2b050072781072" Nov 26 05:46:23 crc kubenswrapper[4871]: E1126 05:46:23.630309 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22e6cb128ffea469e61941b44734ce0419cd3e3dc34120398d2b050072781072\": container with ID starting with 22e6cb128ffea469e61941b44734ce0419cd3e3dc34120398d2b050072781072 not found: ID does not exist" containerID="22e6cb128ffea469e61941b44734ce0419cd3e3dc34120398d2b050072781072" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.630414 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22e6cb128ffea469e61941b44734ce0419cd3e3dc34120398d2b050072781072"} err="failed to get container status \"22e6cb128ffea469e61941b44734ce0419cd3e3dc34120398d2b050072781072\": rpc error: code = NotFound desc = could not find container \"22e6cb128ffea469e61941b44734ce0419cd3e3dc34120398d2b050072781072\": container with ID starting with 22e6cb128ffea469e61941b44734ce0419cd3e3dc34120398d2b050072781072 not found: ID does not exist" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.630491 4871 scope.go:117] "RemoveContainer" containerID="981dee769ebd34f3bc41a9fda57d3ec676abbcf0ba4c1b94a50c29cf3cc02562" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.642593 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.658823 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.670744 4871 scope.go:117] "RemoveContainer" containerID="6ce936f7e5bb9ed3a9b04f43b5473706a905454a61e9c2b33cb767530c3a4320" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.671622 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 05:46:23 crc kubenswrapper[4871]: E1126 05:46:23.672174 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="686e3965-014e-4639-be92-909cf5e0d6b0" containerName="nova-scheduler-scheduler" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.672193 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="686e3965-014e-4639-be92-909cf5e0d6b0" containerName="nova-scheduler-scheduler" Nov 26 05:46:23 crc kubenswrapper[4871]: E1126 05:46:23.672216 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88" containerName="nova-api-log" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.672225 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88" containerName="nova-api-log" Nov 26 05:46:23 crc kubenswrapper[4871]: E1126 05:46:23.672246 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88" containerName="nova-api-api" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.672254 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88" containerName="nova-api-api" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.672556 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="686e3965-014e-4639-be92-909cf5e0d6b0" containerName="nova-scheduler-scheduler" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.672582 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88" containerName="nova-api-log" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.672609 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88" containerName="nova-api-api" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.673506 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.682843 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.689415 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.709580 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.729663 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.741258 4871 scope.go:117] "RemoveContainer" containerID="981dee769ebd34f3bc41a9fda57d3ec676abbcf0ba4c1b94a50c29cf3cc02562" Nov 26 05:46:23 crc kubenswrapper[4871]: E1126 05:46:23.742126 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"981dee769ebd34f3bc41a9fda57d3ec676abbcf0ba4c1b94a50c29cf3cc02562\": container with ID starting with 981dee769ebd34f3bc41a9fda57d3ec676abbcf0ba4c1b94a50c29cf3cc02562 not found: ID does not exist" containerID="981dee769ebd34f3bc41a9fda57d3ec676abbcf0ba4c1b94a50c29cf3cc02562" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.742166 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"981dee769ebd34f3bc41a9fda57d3ec676abbcf0ba4c1b94a50c29cf3cc02562"} err="failed to get container status \"981dee769ebd34f3bc41a9fda57d3ec676abbcf0ba4c1b94a50c29cf3cc02562\": rpc error: code = NotFound desc = could not find container \"981dee769ebd34f3bc41a9fda57d3ec676abbcf0ba4c1b94a50c29cf3cc02562\": container with ID starting with 981dee769ebd34f3bc41a9fda57d3ec676abbcf0ba4c1b94a50c29cf3cc02562 not found: ID does not exist" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.742193 4871 scope.go:117] "RemoveContainer" containerID="6ce936f7e5bb9ed3a9b04f43b5473706a905454a61e9c2b33cb767530c3a4320" Nov 26 05:46:23 crc kubenswrapper[4871]: E1126 05:46:23.742945 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ce936f7e5bb9ed3a9b04f43b5473706a905454a61e9c2b33cb767530c3a4320\": container with ID starting with 6ce936f7e5bb9ed3a9b04f43b5473706a905454a61e9c2b33cb767530c3a4320 not found: ID does not exist" containerID="6ce936f7e5bb9ed3a9b04f43b5473706a905454a61e9c2b33cb767530c3a4320" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.742976 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ce936f7e5bb9ed3a9b04f43b5473706a905454a61e9c2b33cb767530c3a4320"} err="failed to get container status \"6ce936f7e5bb9ed3a9b04f43b5473706a905454a61e9c2b33cb767530c3a4320\": rpc error: code = NotFound desc = could not find container \"6ce936f7e5bb9ed3a9b04f43b5473706a905454a61e9c2b33cb767530c3a4320\": container with ID starting with 6ce936f7e5bb9ed3a9b04f43b5473706a905454a61e9c2b33cb767530c3a4320 not found: ID does not exist" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.747100 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.749078 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.752515 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.769832 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/216f8390-d77e-4760-af6d-838c7f7eb057-config-data\") pod \"nova-scheduler-0\" (UID: \"216f8390-d77e-4760-af6d-838c7f7eb057\") " pod="openstack/nova-scheduler-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.770136 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqnft\" (UniqueName: \"kubernetes.io/projected/216f8390-d77e-4760-af6d-838c7f7eb057-kube-api-access-fqnft\") pod \"nova-scheduler-0\" (UID: \"216f8390-d77e-4760-af6d-838c7f7eb057\") " pod="openstack/nova-scheduler-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.770299 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/216f8390-d77e-4760-af6d-838c7f7eb057-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"216f8390-d77e-4760-af6d-838c7f7eb057\") " pod="openstack/nova-scheduler-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.775191 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.871678 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/708f8f9f-0552-4e11-a0cc-af29841a4b3b-config-data\") pod \"nova-api-0\" (UID: \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\") " pod="openstack/nova-api-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.871732 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/216f8390-d77e-4760-af6d-838c7f7eb057-config-data\") pod \"nova-scheduler-0\" (UID: \"216f8390-d77e-4760-af6d-838c7f7eb057\") " pod="openstack/nova-scheduler-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.871817 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/708f8f9f-0552-4e11-a0cc-af29841a4b3b-logs\") pod \"nova-api-0\" (UID: \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\") " pod="openstack/nova-api-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.871850 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/708f8f9f-0552-4e11-a0cc-af29841a4b3b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\") " pod="openstack/nova-api-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.871888 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqnft\" (UniqueName: \"kubernetes.io/projected/216f8390-d77e-4760-af6d-838c7f7eb057-kube-api-access-fqnft\") pod \"nova-scheduler-0\" (UID: \"216f8390-d77e-4760-af6d-838c7f7eb057\") " pod="openstack/nova-scheduler-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.871999 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/216f8390-d77e-4760-af6d-838c7f7eb057-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"216f8390-d77e-4760-af6d-838c7f7eb057\") " pod="openstack/nova-scheduler-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.872150 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pc268\" (UniqueName: \"kubernetes.io/projected/708f8f9f-0552-4e11-a0cc-af29841a4b3b-kube-api-access-pc268\") pod \"nova-api-0\" (UID: \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\") " pod="openstack/nova-api-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.876164 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/216f8390-d77e-4760-af6d-838c7f7eb057-config-data\") pod \"nova-scheduler-0\" (UID: \"216f8390-d77e-4760-af6d-838c7f7eb057\") " pod="openstack/nova-scheduler-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.880089 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/216f8390-d77e-4760-af6d-838c7f7eb057-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"216f8390-d77e-4760-af6d-838c7f7eb057\") " pod="openstack/nova-scheduler-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.908060 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqnft\" (UniqueName: \"kubernetes.io/projected/216f8390-d77e-4760-af6d-838c7f7eb057-kube-api-access-fqnft\") pod \"nova-scheduler-0\" (UID: \"216f8390-d77e-4760-af6d-838c7f7eb057\") " pod="openstack/nova-scheduler-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.975642 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/708f8f9f-0552-4e11-a0cc-af29841a4b3b-config-data\") pod \"nova-api-0\" (UID: \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\") " pod="openstack/nova-api-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.975987 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/708f8f9f-0552-4e11-a0cc-af29841a4b3b-logs\") pod \"nova-api-0\" (UID: \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\") " pod="openstack/nova-api-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.976014 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/708f8f9f-0552-4e11-a0cc-af29841a4b3b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\") " pod="openstack/nova-api-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.976089 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pc268\" (UniqueName: \"kubernetes.io/projected/708f8f9f-0552-4e11-a0cc-af29841a4b3b-kube-api-access-pc268\") pod \"nova-api-0\" (UID: \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\") " pod="openstack/nova-api-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.976785 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/708f8f9f-0552-4e11-a0cc-af29841a4b3b-logs\") pod \"nova-api-0\" (UID: \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\") " pod="openstack/nova-api-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.994209 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/708f8f9f-0552-4e11-a0cc-af29841a4b3b-config-data\") pod \"nova-api-0\" (UID: \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\") " pod="openstack/nova-api-0" Nov 26 05:46:23 crc kubenswrapper[4871]: I1126 05:46:23.994591 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/708f8f9f-0552-4e11-a0cc-af29841a4b3b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\") " pod="openstack/nova-api-0" Nov 26 05:46:24 crc kubenswrapper[4871]: I1126 05:46:24.003094 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pc268\" (UniqueName: \"kubernetes.io/projected/708f8f9f-0552-4e11-a0cc-af29841a4b3b-kube-api-access-pc268\") pod \"nova-api-0\" (UID: \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\") " pod="openstack/nova-api-0" Nov 26 05:46:24 crc kubenswrapper[4871]: I1126 05:46:24.023925 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 05:46:24 crc kubenswrapper[4871]: I1126 05:46:24.077438 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 05:46:24 crc kubenswrapper[4871]: I1126 05:46:24.519044 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88" path="/var/lib/kubelet/pods/3b7a58d3-1b2d-4cc9-a786-ac16c69ebc88/volumes" Nov 26 05:46:24 crc kubenswrapper[4871]: I1126 05:46:24.520164 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="686e3965-014e-4639-be92-909cf5e0d6b0" path="/var/lib/kubelet/pods/686e3965-014e-4639-be92-909cf5e0d6b0/volumes" Nov 26 05:46:24 crc kubenswrapper[4871]: I1126 05:46:24.585851 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 05:46:24 crc kubenswrapper[4871]: I1126 05:46:24.620142 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"216f8390-d77e-4760-af6d-838c7f7eb057","Type":"ContainerStarted","Data":"ea942ca8b71c481ae43516385bdd8b103eacd9d644aadbfb6e980bb5c6327635"} Nov 26 05:46:24 crc kubenswrapper[4871]: I1126 05:46:24.702412 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 05:46:24 crc kubenswrapper[4871]: W1126 05:46:24.712971 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod708f8f9f_0552_4e11_a0cc_af29841a4b3b.slice/crio-c513fa7d6dd5f9aee629bf375357376069740541a3ed53de9724554c838d67ec WatchSource:0}: Error finding container c513fa7d6dd5f9aee629bf375357376069740541a3ed53de9724554c838d67ec: Status 404 returned error can't find the container with id c513fa7d6dd5f9aee629bf375357376069740541a3ed53de9724554c838d67ec Nov 26 05:46:25 crc kubenswrapper[4871]: I1126 05:46:25.637450 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"708f8f9f-0552-4e11-a0cc-af29841a4b3b","Type":"ContainerStarted","Data":"6a8eb866dd4b0af4eecabc2bb254bf431804e8795d37e843feb6955d6846d3a4"} Nov 26 05:46:25 crc kubenswrapper[4871]: I1126 05:46:25.637875 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"708f8f9f-0552-4e11-a0cc-af29841a4b3b","Type":"ContainerStarted","Data":"cdc03c9544115bd518f766f7e34a3a94318010569fbbd2f2d61f4ac341818776"} Nov 26 05:46:25 crc kubenswrapper[4871]: I1126 05:46:25.637897 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"708f8f9f-0552-4e11-a0cc-af29841a4b3b","Type":"ContainerStarted","Data":"c513fa7d6dd5f9aee629bf375357376069740541a3ed53de9724554c838d67ec"} Nov 26 05:46:25 crc kubenswrapper[4871]: I1126 05:46:25.640652 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"216f8390-d77e-4760-af6d-838c7f7eb057","Type":"ContainerStarted","Data":"4e1e7b36b71431ecaf074304bd6bd5eb7a3f2e89f2384539f419d16b59fa9d1d"} Nov 26 05:46:25 crc kubenswrapper[4871]: I1126 05:46:25.674090 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.674065954 podStartE2EDuration="2.674065954s" podCreationTimestamp="2025-11-26 05:46:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:46:25.66504687 +0000 UTC m=+1243.848098456" watchObservedRunningTime="2025-11-26 05:46:25.674065954 +0000 UTC m=+1243.857117540" Nov 26 05:46:25 crc kubenswrapper[4871]: I1126 05:46:25.693759 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.693743331 podStartE2EDuration="2.693743331s" podCreationTimestamp="2025-11-26 05:46:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:46:25.688335087 +0000 UTC m=+1243.871386693" watchObservedRunningTime="2025-11-26 05:46:25.693743331 +0000 UTC m=+1243.876794917" Nov 26 05:46:25 crc kubenswrapper[4871]: I1126 05:46:25.940613 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 05:46:25 crc kubenswrapper[4871]: I1126 05:46:25.940678 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 05:46:27 crc kubenswrapper[4871]: I1126 05:46:27.547088 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 05:46:27 crc kubenswrapper[4871]: I1126 05:46:27.547557 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="44a0ad7f-f13b-492a-914f-359b86e8be85" containerName="kube-state-metrics" containerID="cri-o://cf324bf05fc9eac95b2c634bdce995f796f2fbf27ee8b2b42aa4fad6ee23cebd" gracePeriod=30 Nov 26 05:46:27 crc kubenswrapper[4871]: I1126 05:46:27.623997 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/kube-state-metrics-0" podUID="44a0ad7f-f13b-492a-914f-359b86e8be85" containerName="kube-state-metrics" probeResult="failure" output="Get \"http://10.217.0.115:8081/readyz\": dial tcp 10.217.0.115:8081: connect: connection refused" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.128979 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.260150 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xsm7b\" (UniqueName: \"kubernetes.io/projected/44a0ad7f-f13b-492a-914f-359b86e8be85-kube-api-access-xsm7b\") pod \"44a0ad7f-f13b-492a-914f-359b86e8be85\" (UID: \"44a0ad7f-f13b-492a-914f-359b86e8be85\") " Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.268118 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44a0ad7f-f13b-492a-914f-359b86e8be85-kube-api-access-xsm7b" (OuterVolumeSpecName: "kube-api-access-xsm7b") pod "44a0ad7f-f13b-492a-914f-359b86e8be85" (UID: "44a0ad7f-f13b-492a-914f-359b86e8be85"). InnerVolumeSpecName "kube-api-access-xsm7b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.362395 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xsm7b\" (UniqueName: \"kubernetes.io/projected/44a0ad7f-f13b-492a-914f-359b86e8be85-kube-api-access-xsm7b\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.680209 4871 generic.go:334] "Generic (PLEG): container finished" podID="44a0ad7f-f13b-492a-914f-359b86e8be85" containerID="cf324bf05fc9eac95b2c634bdce995f796f2fbf27ee8b2b42aa4fad6ee23cebd" exitCode=2 Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.680260 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"44a0ad7f-f13b-492a-914f-359b86e8be85","Type":"ContainerDied","Data":"cf324bf05fc9eac95b2c634bdce995f796f2fbf27ee8b2b42aa4fad6ee23cebd"} Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.680317 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"44a0ad7f-f13b-492a-914f-359b86e8be85","Type":"ContainerDied","Data":"b4a5d08f43c58b12ff8e0099f470efda868583ddd145d293cc43a60e2cec4d22"} Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.680341 4871 scope.go:117] "RemoveContainer" containerID="cf324bf05fc9eac95b2c634bdce995f796f2fbf27ee8b2b42aa4fad6ee23cebd" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.680271 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.707398 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.711727 4871 scope.go:117] "RemoveContainer" containerID="cf324bf05fc9eac95b2c634bdce995f796f2fbf27ee8b2b42aa4fad6ee23cebd" Nov 26 05:46:28 crc kubenswrapper[4871]: E1126 05:46:28.712374 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf324bf05fc9eac95b2c634bdce995f796f2fbf27ee8b2b42aa4fad6ee23cebd\": container with ID starting with cf324bf05fc9eac95b2c634bdce995f796f2fbf27ee8b2b42aa4fad6ee23cebd not found: ID does not exist" containerID="cf324bf05fc9eac95b2c634bdce995f796f2fbf27ee8b2b42aa4fad6ee23cebd" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.712438 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf324bf05fc9eac95b2c634bdce995f796f2fbf27ee8b2b42aa4fad6ee23cebd"} err="failed to get container status \"cf324bf05fc9eac95b2c634bdce995f796f2fbf27ee8b2b42aa4fad6ee23cebd\": rpc error: code = NotFound desc = could not find container \"cf324bf05fc9eac95b2c634bdce995f796f2fbf27ee8b2b42aa4fad6ee23cebd\": container with ID starting with cf324bf05fc9eac95b2c634bdce995f796f2fbf27ee8b2b42aa4fad6ee23cebd not found: ID does not exist" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.721372 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.738356 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 05:46:28 crc kubenswrapper[4871]: E1126 05:46:28.738908 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44a0ad7f-f13b-492a-914f-359b86e8be85" containerName="kube-state-metrics" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.738924 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="44a0ad7f-f13b-492a-914f-359b86e8be85" containerName="kube-state-metrics" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.739102 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="44a0ad7f-f13b-492a-914f-359b86e8be85" containerName="kube-state-metrics" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.739847 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.747836 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.749894 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.779213 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.873128 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/e20fd17b-5b64-4272-9876-347ea057aa04-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"e20fd17b-5b64-4272-9876-347ea057aa04\") " pod="openstack/kube-state-metrics-0" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.873198 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/e20fd17b-5b64-4272-9876-347ea057aa04-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"e20fd17b-5b64-4272-9876-347ea057aa04\") " pod="openstack/kube-state-metrics-0" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.873405 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e20fd17b-5b64-4272-9876-347ea057aa04-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"e20fd17b-5b64-4272-9876-347ea057aa04\") " pod="openstack/kube-state-metrics-0" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.873450 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6b9rd\" (UniqueName: \"kubernetes.io/projected/e20fd17b-5b64-4272-9876-347ea057aa04-kube-api-access-6b9rd\") pod \"kube-state-metrics-0\" (UID: \"e20fd17b-5b64-4272-9876-347ea057aa04\") " pod="openstack/kube-state-metrics-0" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.975365 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e20fd17b-5b64-4272-9876-347ea057aa04-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"e20fd17b-5b64-4272-9876-347ea057aa04\") " pod="openstack/kube-state-metrics-0" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.975432 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6b9rd\" (UniqueName: \"kubernetes.io/projected/e20fd17b-5b64-4272-9876-347ea057aa04-kube-api-access-6b9rd\") pod \"kube-state-metrics-0\" (UID: \"e20fd17b-5b64-4272-9876-347ea057aa04\") " pod="openstack/kube-state-metrics-0" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.975726 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/e20fd17b-5b64-4272-9876-347ea057aa04-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"e20fd17b-5b64-4272-9876-347ea057aa04\") " pod="openstack/kube-state-metrics-0" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.975776 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/e20fd17b-5b64-4272-9876-347ea057aa04-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"e20fd17b-5b64-4272-9876-347ea057aa04\") " pod="openstack/kube-state-metrics-0" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.979693 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/e20fd17b-5b64-4272-9876-347ea057aa04-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"e20fd17b-5b64-4272-9876-347ea057aa04\") " pod="openstack/kube-state-metrics-0" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.981625 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/e20fd17b-5b64-4272-9876-347ea057aa04-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"e20fd17b-5b64-4272-9876-347ea057aa04\") " pod="openstack/kube-state-metrics-0" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.982440 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e20fd17b-5b64-4272-9876-347ea057aa04-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"e20fd17b-5b64-4272-9876-347ea057aa04\") " pod="openstack/kube-state-metrics-0" Nov 26 05:46:28 crc kubenswrapper[4871]: I1126 05:46:28.993608 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6b9rd\" (UniqueName: \"kubernetes.io/projected/e20fd17b-5b64-4272-9876-347ea057aa04-kube-api-access-6b9rd\") pod \"kube-state-metrics-0\" (UID: \"e20fd17b-5b64-4272-9876-347ea057aa04\") " pod="openstack/kube-state-metrics-0" Nov 26 05:46:29 crc kubenswrapper[4871]: I1126 05:46:29.025280 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 05:46:29 crc kubenswrapper[4871]: I1126 05:46:29.056618 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 05:46:29 crc kubenswrapper[4871]: I1126 05:46:29.476701 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:46:29 crc kubenswrapper[4871]: I1126 05:46:29.477307 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerName="ceilometer-central-agent" containerID="cri-o://fa64d744984459215467fc80c6671d31fb1156ea2044451962d87355d48658d6" gracePeriod=30 Nov 26 05:46:29 crc kubenswrapper[4871]: I1126 05:46:29.477400 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerName="proxy-httpd" containerID="cri-o://bf3b773a94780de7572d95f628daf1dd756367f97c96bab21b016729aff72b1a" gracePeriod=30 Nov 26 05:46:29 crc kubenswrapper[4871]: I1126 05:46:29.477448 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerName="sg-core" containerID="cri-o://a9e03b8f86c54ba4d2ffba9948c42978d3511e0fdb43426835465b40e3e5d25f" gracePeriod=30 Nov 26 05:46:29 crc kubenswrapper[4871]: I1126 05:46:29.477469 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerName="ceilometer-notification-agent" containerID="cri-o://ba9e63f547cce51df9656859f4618a04e37c4b58b195771d86440a91ef178a55" gracePeriod=30 Nov 26 05:46:29 crc kubenswrapper[4871]: I1126 05:46:29.551477 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 05:46:29 crc kubenswrapper[4871]: I1126 05:46:29.700301 4871 generic.go:334] "Generic (PLEG): container finished" podID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerID="a9e03b8f86c54ba4d2ffba9948c42978d3511e0fdb43426835465b40e3e5d25f" exitCode=2 Nov 26 05:46:29 crc kubenswrapper[4871]: I1126 05:46:29.700399 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef","Type":"ContainerDied","Data":"a9e03b8f86c54ba4d2ffba9948c42978d3511e0fdb43426835465b40e3e5d25f"} Nov 26 05:46:29 crc kubenswrapper[4871]: I1126 05:46:29.705660 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"e20fd17b-5b64-4272-9876-347ea057aa04","Type":"ContainerStarted","Data":"3e0cac492626885b5565a7b2dd1f39093491536158e25cd97de925e39e987419"} Nov 26 05:46:30 crc kubenswrapper[4871]: I1126 05:46:30.524611 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44a0ad7f-f13b-492a-914f-359b86e8be85" path="/var/lib/kubelet/pods/44a0ad7f-f13b-492a-914f-359b86e8be85/volumes" Nov 26 05:46:30 crc kubenswrapper[4871]: I1126 05:46:30.717730 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"e20fd17b-5b64-4272-9876-347ea057aa04","Type":"ContainerStarted","Data":"7ccf9074a14c68d82f7d4ade08497eca035b4d9ff429c2547ba37643744bbae9"} Nov 26 05:46:30 crc kubenswrapper[4871]: I1126 05:46:30.718681 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 26 05:46:30 crc kubenswrapper[4871]: I1126 05:46:30.721438 4871 generic.go:334] "Generic (PLEG): container finished" podID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerID="bf3b773a94780de7572d95f628daf1dd756367f97c96bab21b016729aff72b1a" exitCode=0 Nov 26 05:46:30 crc kubenswrapper[4871]: I1126 05:46:30.721494 4871 generic.go:334] "Generic (PLEG): container finished" podID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerID="fa64d744984459215467fc80c6671d31fb1156ea2044451962d87355d48658d6" exitCode=0 Nov 26 05:46:30 crc kubenswrapper[4871]: I1126 05:46:30.721497 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef","Type":"ContainerDied","Data":"bf3b773a94780de7572d95f628daf1dd756367f97c96bab21b016729aff72b1a"} Nov 26 05:46:30 crc kubenswrapper[4871]: I1126 05:46:30.721587 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef","Type":"ContainerDied","Data":"fa64d744984459215467fc80c6671d31fb1156ea2044451962d87355d48658d6"} Nov 26 05:46:30 crc kubenswrapper[4871]: I1126 05:46:30.745621 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.402206816 podStartE2EDuration="2.745597626s" podCreationTimestamp="2025-11-26 05:46:28 +0000 UTC" firstStartedPulling="2025-11-26 05:46:29.555833109 +0000 UTC m=+1247.738884695" lastFinishedPulling="2025-11-26 05:46:29.899223919 +0000 UTC m=+1248.082275505" observedRunningTime="2025-11-26 05:46:30.738184693 +0000 UTC m=+1248.921236289" watchObservedRunningTime="2025-11-26 05:46:30.745597626 +0000 UTC m=+1248.928649222" Nov 26 05:46:30 crc kubenswrapper[4871]: I1126 05:46:30.940716 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 05:46:30 crc kubenswrapper[4871]: I1126 05:46:30.940760 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 05:46:30 crc kubenswrapper[4871]: I1126 05:46:30.942791 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 26 05:46:31 crc kubenswrapper[4871]: I1126 05:46:31.954924 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="9af59634-8618-4934-8a71-606bcde10c43" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.215:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 05:46:31 crc kubenswrapper[4871]: I1126 05:46:31.954862 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="9af59634-8618-4934-8a71-606bcde10c43" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.215:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 05:46:34 crc kubenswrapper[4871]: I1126 05:46:34.024117 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 05:46:34 crc kubenswrapper[4871]: I1126 05:46:34.058818 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 05:46:34 crc kubenswrapper[4871]: I1126 05:46:34.078057 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 05:46:34 crc kubenswrapper[4871]: I1126 05:46:34.078113 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 05:46:34 crc kubenswrapper[4871]: I1126 05:46:34.813115 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.160694 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="708f8f9f-0552-4e11-a0cc-af29841a4b3b" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.217:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.160720 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="708f8f9f-0552-4e11-a0cc-af29841a4b3b" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.217:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.613501 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.709930 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-run-httpd\") pod \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.710051 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-combined-ca-bundle\") pod \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.710077 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-config-data\") pod \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.710104 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-scripts\") pod \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.710187 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-log-httpd\") pod \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.710203 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vsgd\" (UniqueName: \"kubernetes.io/projected/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-kube-api-access-2vsgd\") pod \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.710227 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-sg-core-conf-yaml\") pod \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\" (UID: \"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef\") " Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.710298 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" (UID: "9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.710509 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" (UID: "9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.710701 4871 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.710716 4871 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.715627 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-kube-api-access-2vsgd" (OuterVolumeSpecName: "kube-api-access-2vsgd") pod "9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" (UID: "9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef"). InnerVolumeSpecName "kube-api-access-2vsgd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.722718 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-scripts" (OuterVolumeSpecName: "scripts") pod "9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" (UID: "9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.749750 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" (UID: "9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.785981 4871 generic.go:334] "Generic (PLEG): container finished" podID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerID="ba9e63f547cce51df9656859f4618a04e37c4b58b195771d86440a91ef178a55" exitCode=0 Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.787597 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.787591 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef","Type":"ContainerDied","Data":"ba9e63f547cce51df9656859f4618a04e37c4b58b195771d86440a91ef178a55"} Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.787814 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef","Type":"ContainerDied","Data":"c415895006d9a413f422b48c3c4804a0c1544204db47ccf43d7e686387a78096"} Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.787846 4871 scope.go:117] "RemoveContainer" containerID="bf3b773a94780de7572d95f628daf1dd756367f97c96bab21b016729aff72b1a" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.812743 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.812764 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vsgd\" (UniqueName: \"kubernetes.io/projected/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-kube-api-access-2vsgd\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.812776 4871 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.822313 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" (UID: "9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.846125 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-config-data" (OuterVolumeSpecName: "config-data") pod "9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" (UID: "9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.861701 4871 scope.go:117] "RemoveContainer" containerID="a9e03b8f86c54ba4d2ffba9948c42978d3511e0fdb43426835465b40e3e5d25f" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.915378 4871 scope.go:117] "RemoveContainer" containerID="ba9e63f547cce51df9656859f4618a04e37c4b58b195771d86440a91ef178a55" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.918446 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.918473 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.972421 4871 scope.go:117] "RemoveContainer" containerID="fa64d744984459215467fc80c6671d31fb1156ea2044451962d87355d48658d6" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.994551 4871 scope.go:117] "RemoveContainer" containerID="bf3b773a94780de7572d95f628daf1dd756367f97c96bab21b016729aff72b1a" Nov 26 05:46:35 crc kubenswrapper[4871]: E1126 05:46:35.994958 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf3b773a94780de7572d95f628daf1dd756367f97c96bab21b016729aff72b1a\": container with ID starting with bf3b773a94780de7572d95f628daf1dd756367f97c96bab21b016729aff72b1a not found: ID does not exist" containerID="bf3b773a94780de7572d95f628daf1dd756367f97c96bab21b016729aff72b1a" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.994988 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf3b773a94780de7572d95f628daf1dd756367f97c96bab21b016729aff72b1a"} err="failed to get container status \"bf3b773a94780de7572d95f628daf1dd756367f97c96bab21b016729aff72b1a\": rpc error: code = NotFound desc = could not find container \"bf3b773a94780de7572d95f628daf1dd756367f97c96bab21b016729aff72b1a\": container with ID starting with bf3b773a94780de7572d95f628daf1dd756367f97c96bab21b016729aff72b1a not found: ID does not exist" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.995007 4871 scope.go:117] "RemoveContainer" containerID="a9e03b8f86c54ba4d2ffba9948c42978d3511e0fdb43426835465b40e3e5d25f" Nov 26 05:46:35 crc kubenswrapper[4871]: E1126 05:46:35.995222 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9e03b8f86c54ba4d2ffba9948c42978d3511e0fdb43426835465b40e3e5d25f\": container with ID starting with a9e03b8f86c54ba4d2ffba9948c42978d3511e0fdb43426835465b40e3e5d25f not found: ID does not exist" containerID="a9e03b8f86c54ba4d2ffba9948c42978d3511e0fdb43426835465b40e3e5d25f" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.995244 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9e03b8f86c54ba4d2ffba9948c42978d3511e0fdb43426835465b40e3e5d25f"} err="failed to get container status \"a9e03b8f86c54ba4d2ffba9948c42978d3511e0fdb43426835465b40e3e5d25f\": rpc error: code = NotFound desc = could not find container \"a9e03b8f86c54ba4d2ffba9948c42978d3511e0fdb43426835465b40e3e5d25f\": container with ID starting with a9e03b8f86c54ba4d2ffba9948c42978d3511e0fdb43426835465b40e3e5d25f not found: ID does not exist" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.995257 4871 scope.go:117] "RemoveContainer" containerID="ba9e63f547cce51df9656859f4618a04e37c4b58b195771d86440a91ef178a55" Nov 26 05:46:35 crc kubenswrapper[4871]: E1126 05:46:35.995443 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba9e63f547cce51df9656859f4618a04e37c4b58b195771d86440a91ef178a55\": container with ID starting with ba9e63f547cce51df9656859f4618a04e37c4b58b195771d86440a91ef178a55 not found: ID does not exist" containerID="ba9e63f547cce51df9656859f4618a04e37c4b58b195771d86440a91ef178a55" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.995462 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba9e63f547cce51df9656859f4618a04e37c4b58b195771d86440a91ef178a55"} err="failed to get container status \"ba9e63f547cce51df9656859f4618a04e37c4b58b195771d86440a91ef178a55\": rpc error: code = NotFound desc = could not find container \"ba9e63f547cce51df9656859f4618a04e37c4b58b195771d86440a91ef178a55\": container with ID starting with ba9e63f547cce51df9656859f4618a04e37c4b58b195771d86440a91ef178a55 not found: ID does not exist" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.995474 4871 scope.go:117] "RemoveContainer" containerID="fa64d744984459215467fc80c6671d31fb1156ea2044451962d87355d48658d6" Nov 26 05:46:35 crc kubenswrapper[4871]: E1126 05:46:35.995668 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa64d744984459215467fc80c6671d31fb1156ea2044451962d87355d48658d6\": container with ID starting with fa64d744984459215467fc80c6671d31fb1156ea2044451962d87355d48658d6 not found: ID does not exist" containerID="fa64d744984459215467fc80c6671d31fb1156ea2044451962d87355d48658d6" Nov 26 05:46:35 crc kubenswrapper[4871]: I1126 05:46:35.995688 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa64d744984459215467fc80c6671d31fb1156ea2044451962d87355d48658d6"} err="failed to get container status \"fa64d744984459215467fc80c6671d31fb1156ea2044451962d87355d48658d6\": rpc error: code = NotFound desc = could not find container \"fa64d744984459215467fc80c6671d31fb1156ea2044451962d87355d48658d6\": container with ID starting with fa64d744984459215467fc80c6671d31fb1156ea2044451962d87355d48658d6 not found: ID does not exist" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.125575 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.143805 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.177330 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:46:36 crc kubenswrapper[4871]: E1126 05:46:36.177852 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerName="proxy-httpd" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.177866 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerName="proxy-httpd" Nov 26 05:46:36 crc kubenswrapper[4871]: E1126 05:46:36.177885 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerName="ceilometer-central-agent" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.177891 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerName="ceilometer-central-agent" Nov 26 05:46:36 crc kubenswrapper[4871]: E1126 05:46:36.177905 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerName="sg-core" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.177911 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerName="sg-core" Nov 26 05:46:36 crc kubenswrapper[4871]: E1126 05:46:36.177920 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerName="ceilometer-notification-agent" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.177926 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerName="ceilometer-notification-agent" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.178096 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerName="sg-core" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.178109 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerName="ceilometer-central-agent" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.178122 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerName="ceilometer-notification-agent" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.178143 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" containerName="proxy-httpd" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.179842 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.183757 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.184084 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.184239 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.202288 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.241232 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f3b8e510-e73e-443d-af27-1b406415874a-log-httpd\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.241297 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.241340 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.241425 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zmpn\" (UniqueName: \"kubernetes.io/projected/f3b8e510-e73e-443d-af27-1b406415874a-kube-api-access-6zmpn\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.241506 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f3b8e510-e73e-443d-af27-1b406415874a-run-httpd\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.241590 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-config-data\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.241772 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-scripts\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.241823 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.343825 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.343893 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.343927 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zmpn\" (UniqueName: \"kubernetes.io/projected/f3b8e510-e73e-443d-af27-1b406415874a-kube-api-access-6zmpn\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.343969 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f3b8e510-e73e-443d-af27-1b406415874a-run-httpd\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.343987 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-config-data\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.344035 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-scripts\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.344052 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.344096 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f3b8e510-e73e-443d-af27-1b406415874a-log-httpd\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.344977 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f3b8e510-e73e-443d-af27-1b406415874a-log-httpd\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.345202 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f3b8e510-e73e-443d-af27-1b406415874a-run-httpd\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.349478 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.351396 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-config-data\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.352049 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-scripts\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.353149 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.361236 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.365074 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zmpn\" (UniqueName: \"kubernetes.io/projected/f3b8e510-e73e-443d-af27-1b406415874a-kube-api-access-6zmpn\") pod \"ceilometer-0\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " pod="openstack/ceilometer-0" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.522773 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef" path="/var/lib/kubelet/pods/9e02bb4c-0d06-4203-bbc9-35f4f3b3c1ef/volumes" Nov 26 05:46:36 crc kubenswrapper[4871]: I1126 05:46:36.551069 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:46:37 crc kubenswrapper[4871]: I1126 05:46:37.073514 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:46:37 crc kubenswrapper[4871]: W1126 05:46:37.089195 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3b8e510_e73e_443d_af27_1b406415874a.slice/crio-c95273ded0c338a999c2fd4c58ce778987f3bc56b10cb36ecb273d92506d37a8 WatchSource:0}: Error finding container c95273ded0c338a999c2fd4c58ce778987f3bc56b10cb36ecb273d92506d37a8: Status 404 returned error can't find the container with id c95273ded0c338a999c2fd4c58ce778987f3bc56b10cb36ecb273d92506d37a8 Nov 26 05:46:37 crc kubenswrapper[4871]: I1126 05:46:37.808076 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f3b8e510-e73e-443d-af27-1b406415874a","Type":"ContainerStarted","Data":"c0f767aef6999be3c85acd6a2f4d15eda4713d46381e221b265cc9ca241a1efa"} Nov 26 05:46:37 crc kubenswrapper[4871]: I1126 05:46:37.808771 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f3b8e510-e73e-443d-af27-1b406415874a","Type":"ContainerStarted","Data":"c95273ded0c338a999c2fd4c58ce778987f3bc56b10cb36ecb273d92506d37a8"} Nov 26 05:46:38 crc kubenswrapper[4871]: I1126 05:46:38.820974 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f3b8e510-e73e-443d-af27-1b406415874a","Type":"ContainerStarted","Data":"b1ed857145f01e3ca9774882273148508e62165fcc00122603f56c185228eb67"} Nov 26 05:46:38 crc kubenswrapper[4871]: I1126 05:46:38.821355 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f3b8e510-e73e-443d-af27-1b406415874a","Type":"ContainerStarted","Data":"0cf18a43075ff4eec8cd5c514506b996f11104cc9376286a8d819fb5e9afc495"} Nov 26 05:46:39 crc kubenswrapper[4871]: I1126 05:46:39.064629 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 26 05:46:40 crc kubenswrapper[4871]: I1126 05:46:40.842297 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f3b8e510-e73e-443d-af27-1b406415874a","Type":"ContainerStarted","Data":"18f6b63715e969d125aeeadec99e807978fb5fc9911e95a7862cd082f6762b6c"} Nov 26 05:46:40 crc kubenswrapper[4871]: I1126 05:46:40.842922 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 05:46:40 crc kubenswrapper[4871]: I1126 05:46:40.875645 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.018378694 podStartE2EDuration="4.875625729s" podCreationTimestamp="2025-11-26 05:46:36 +0000 UTC" firstStartedPulling="2025-11-26 05:46:37.096931448 +0000 UTC m=+1255.279983034" lastFinishedPulling="2025-11-26 05:46:39.954178483 +0000 UTC m=+1258.137230069" observedRunningTime="2025-11-26 05:46:40.872208605 +0000 UTC m=+1259.055260191" watchObservedRunningTime="2025-11-26 05:46:40.875625729 +0000 UTC m=+1259.058677315" Nov 26 05:46:40 crc kubenswrapper[4871]: I1126 05:46:40.946107 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 05:46:40 crc kubenswrapper[4871]: I1126 05:46:40.953636 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 05:46:40 crc kubenswrapper[4871]: I1126 05:46:40.956257 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 05:46:41 crc kubenswrapper[4871]: I1126 05:46:41.811444 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:41 crc kubenswrapper[4871]: I1126 05:46:41.851327 4871 generic.go:334] "Generic (PLEG): container finished" podID="ab149d36-1511-4420-8d1e-c33cca902bf2" containerID="5fc76647b33fd8043e4fd68aa32974de96c16dde68c273a54006b703b8721b28" exitCode=137 Nov 26 05:46:41 crc kubenswrapper[4871]: I1126 05:46:41.851364 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:41 crc kubenswrapper[4871]: I1126 05:46:41.851620 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ab149d36-1511-4420-8d1e-c33cca902bf2","Type":"ContainerDied","Data":"5fc76647b33fd8043e4fd68aa32974de96c16dde68c273a54006b703b8721b28"} Nov 26 05:46:41 crc kubenswrapper[4871]: I1126 05:46:41.851646 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ab149d36-1511-4420-8d1e-c33cca902bf2","Type":"ContainerDied","Data":"55fba4985e1db3398a9550a0dd99379b3c70802b7e9d08eaafcc7d006d84e487"} Nov 26 05:46:41 crc kubenswrapper[4871]: I1126 05:46:41.851661 4871 scope.go:117] "RemoveContainer" containerID="5fc76647b33fd8043e4fd68aa32974de96c16dde68c273a54006b703b8721b28" Nov 26 05:46:41 crc kubenswrapper[4871]: I1126 05:46:41.857310 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 05:46:41 crc kubenswrapper[4871]: I1126 05:46:41.873587 4871 scope.go:117] "RemoveContainer" containerID="5fc76647b33fd8043e4fd68aa32974de96c16dde68c273a54006b703b8721b28" Nov 26 05:46:41 crc kubenswrapper[4871]: E1126 05:46:41.874131 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fc76647b33fd8043e4fd68aa32974de96c16dde68c273a54006b703b8721b28\": container with ID starting with 5fc76647b33fd8043e4fd68aa32974de96c16dde68c273a54006b703b8721b28 not found: ID does not exist" containerID="5fc76647b33fd8043e4fd68aa32974de96c16dde68c273a54006b703b8721b28" Nov 26 05:46:41 crc kubenswrapper[4871]: I1126 05:46:41.874176 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fc76647b33fd8043e4fd68aa32974de96c16dde68c273a54006b703b8721b28"} err="failed to get container status \"5fc76647b33fd8043e4fd68aa32974de96c16dde68c273a54006b703b8721b28\": rpc error: code = NotFound desc = could not find container \"5fc76647b33fd8043e4fd68aa32974de96c16dde68c273a54006b703b8721b28\": container with ID starting with 5fc76647b33fd8043e4fd68aa32974de96c16dde68c273a54006b703b8721b28 not found: ID does not exist" Nov 26 05:46:41 crc kubenswrapper[4871]: I1126 05:46:41.975873 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2n59t\" (UniqueName: \"kubernetes.io/projected/ab149d36-1511-4420-8d1e-c33cca902bf2-kube-api-access-2n59t\") pod \"ab149d36-1511-4420-8d1e-c33cca902bf2\" (UID: \"ab149d36-1511-4420-8d1e-c33cca902bf2\") " Nov 26 05:46:41 crc kubenswrapper[4871]: I1126 05:46:41.976020 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab149d36-1511-4420-8d1e-c33cca902bf2-combined-ca-bundle\") pod \"ab149d36-1511-4420-8d1e-c33cca902bf2\" (UID: \"ab149d36-1511-4420-8d1e-c33cca902bf2\") " Nov 26 05:46:41 crc kubenswrapper[4871]: I1126 05:46:41.976152 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab149d36-1511-4420-8d1e-c33cca902bf2-config-data\") pod \"ab149d36-1511-4420-8d1e-c33cca902bf2\" (UID: \"ab149d36-1511-4420-8d1e-c33cca902bf2\") " Nov 26 05:46:41 crc kubenswrapper[4871]: I1126 05:46:41.993910 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab149d36-1511-4420-8d1e-c33cca902bf2-kube-api-access-2n59t" (OuterVolumeSpecName: "kube-api-access-2n59t") pod "ab149d36-1511-4420-8d1e-c33cca902bf2" (UID: "ab149d36-1511-4420-8d1e-c33cca902bf2"). InnerVolumeSpecName "kube-api-access-2n59t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.008920 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab149d36-1511-4420-8d1e-c33cca902bf2-config-data" (OuterVolumeSpecName: "config-data") pod "ab149d36-1511-4420-8d1e-c33cca902bf2" (UID: "ab149d36-1511-4420-8d1e-c33cca902bf2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.022460 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab149d36-1511-4420-8d1e-c33cca902bf2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ab149d36-1511-4420-8d1e-c33cca902bf2" (UID: "ab149d36-1511-4420-8d1e-c33cca902bf2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.078353 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab149d36-1511-4420-8d1e-c33cca902bf2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.078569 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab149d36-1511-4420-8d1e-c33cca902bf2-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.078624 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2n59t\" (UniqueName: \"kubernetes.io/projected/ab149d36-1511-4420-8d1e-c33cca902bf2-kube-api-access-2n59t\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.183477 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.192873 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.204759 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 05:46:42 crc kubenswrapper[4871]: E1126 05:46:42.205156 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab149d36-1511-4420-8d1e-c33cca902bf2" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.205173 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab149d36-1511-4420-8d1e-c33cca902bf2" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.205370 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab149d36-1511-4420-8d1e-c33cca902bf2" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.207229 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.210308 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.210638 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.210970 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.230545 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.282998 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/94714c91-ac3e-4195-9c74-84e090b73a6e-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"94714c91-ac3e-4195-9c74-84e090b73a6e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.283237 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/94714c91-ac3e-4195-9c74-84e090b73a6e-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"94714c91-ac3e-4195-9c74-84e090b73a6e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.283365 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94714c91-ac3e-4195-9c74-84e090b73a6e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"94714c91-ac3e-4195-9c74-84e090b73a6e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.283464 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwsmr\" (UniqueName: \"kubernetes.io/projected/94714c91-ac3e-4195-9c74-84e090b73a6e-kube-api-access-lwsmr\") pod \"nova-cell1-novncproxy-0\" (UID: \"94714c91-ac3e-4195-9c74-84e090b73a6e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.283585 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94714c91-ac3e-4195-9c74-84e090b73a6e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"94714c91-ac3e-4195-9c74-84e090b73a6e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.386400 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/94714c91-ac3e-4195-9c74-84e090b73a6e-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"94714c91-ac3e-4195-9c74-84e090b73a6e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.386460 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/94714c91-ac3e-4195-9c74-84e090b73a6e-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"94714c91-ac3e-4195-9c74-84e090b73a6e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.386495 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94714c91-ac3e-4195-9c74-84e090b73a6e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"94714c91-ac3e-4195-9c74-84e090b73a6e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.386550 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwsmr\" (UniqueName: \"kubernetes.io/projected/94714c91-ac3e-4195-9c74-84e090b73a6e-kube-api-access-lwsmr\") pod \"nova-cell1-novncproxy-0\" (UID: \"94714c91-ac3e-4195-9c74-84e090b73a6e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.386602 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94714c91-ac3e-4195-9c74-84e090b73a6e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"94714c91-ac3e-4195-9c74-84e090b73a6e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.393693 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/94714c91-ac3e-4195-9c74-84e090b73a6e-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"94714c91-ac3e-4195-9c74-84e090b73a6e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.393844 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94714c91-ac3e-4195-9c74-84e090b73a6e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"94714c91-ac3e-4195-9c74-84e090b73a6e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.399070 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94714c91-ac3e-4195-9c74-84e090b73a6e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"94714c91-ac3e-4195-9c74-84e090b73a6e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.400131 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/94714c91-ac3e-4195-9c74-84e090b73a6e-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"94714c91-ac3e-4195-9c74-84e090b73a6e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.427186 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwsmr\" (UniqueName: \"kubernetes.io/projected/94714c91-ac3e-4195-9c74-84e090b73a6e-kube-api-access-lwsmr\") pod \"nova-cell1-novncproxy-0\" (UID: \"94714c91-ac3e-4195-9c74-84e090b73a6e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.524983 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:42 crc kubenswrapper[4871]: I1126 05:46:42.539201 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab149d36-1511-4420-8d1e-c33cca902bf2" path="/var/lib/kubelet/pods/ab149d36-1511-4420-8d1e-c33cca902bf2/volumes" Nov 26 05:46:43 crc kubenswrapper[4871]: W1126 05:46:43.043639 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod94714c91_ac3e_4195_9c74_84e090b73a6e.slice/crio-b2b5b2ce12ab7cba2d8440aefee940314ed4630b4fc6e6266618cef4b78382d5 WatchSource:0}: Error finding container b2b5b2ce12ab7cba2d8440aefee940314ed4630b4fc6e6266618cef4b78382d5: Status 404 returned error can't find the container with id b2b5b2ce12ab7cba2d8440aefee940314ed4630b4fc6e6266618cef4b78382d5 Nov 26 05:46:43 crc kubenswrapper[4871]: I1126 05:46:43.049345 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 05:46:43 crc kubenswrapper[4871]: I1126 05:46:43.902832 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"94714c91-ac3e-4195-9c74-84e090b73a6e","Type":"ContainerStarted","Data":"4711fc1883d43669f80893fa0e76901f75ec5ea1a59a3e671a8e60c4777087f4"} Nov 26 05:46:43 crc kubenswrapper[4871]: I1126 05:46:43.903221 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"94714c91-ac3e-4195-9c74-84e090b73a6e","Type":"ContainerStarted","Data":"b2b5b2ce12ab7cba2d8440aefee940314ed4630b4fc6e6266618cef4b78382d5"} Nov 26 05:46:43 crc kubenswrapper[4871]: I1126 05:46:43.933692 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=1.93366923 podStartE2EDuration="1.93366923s" podCreationTimestamp="2025-11-26 05:46:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:46:43.927658711 +0000 UTC m=+1262.110710297" watchObservedRunningTime="2025-11-26 05:46:43.93366923 +0000 UTC m=+1262.116720826" Nov 26 05:46:44 crc kubenswrapper[4871]: I1126 05:46:44.087002 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 05:46:44 crc kubenswrapper[4871]: I1126 05:46:44.087565 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 05:46:44 crc kubenswrapper[4871]: I1126 05:46:44.092883 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 05:46:44 crc kubenswrapper[4871]: I1126 05:46:44.100776 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 05:46:44 crc kubenswrapper[4871]: I1126 05:46:44.914852 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 05:46:44 crc kubenswrapper[4871]: I1126 05:46:44.926815 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.108435 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-54599d8f7-7gq8f"] Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.110291 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.125618 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54599d8f7-7gq8f"] Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.259322 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqw68\" (UniqueName: \"kubernetes.io/projected/6d11cc8a-2c3e-421f-a156-0a811156876e-kube-api-access-gqw68\") pod \"dnsmasq-dns-54599d8f7-7gq8f\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.259407 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-dns-swift-storage-0\") pod \"dnsmasq-dns-54599d8f7-7gq8f\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.259448 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-config\") pod \"dnsmasq-dns-54599d8f7-7gq8f\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.259480 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-ovsdbserver-nb\") pod \"dnsmasq-dns-54599d8f7-7gq8f\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.259553 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-dns-svc\") pod \"dnsmasq-dns-54599d8f7-7gq8f\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.259598 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-ovsdbserver-sb\") pod \"dnsmasq-dns-54599d8f7-7gq8f\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.360981 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqw68\" (UniqueName: \"kubernetes.io/projected/6d11cc8a-2c3e-421f-a156-0a811156876e-kube-api-access-gqw68\") pod \"dnsmasq-dns-54599d8f7-7gq8f\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.361050 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-dns-swift-storage-0\") pod \"dnsmasq-dns-54599d8f7-7gq8f\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.361081 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-config\") pod \"dnsmasq-dns-54599d8f7-7gq8f\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.361107 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-ovsdbserver-nb\") pod \"dnsmasq-dns-54599d8f7-7gq8f\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.361125 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-dns-svc\") pod \"dnsmasq-dns-54599d8f7-7gq8f\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.361156 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-ovsdbserver-sb\") pod \"dnsmasq-dns-54599d8f7-7gq8f\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.363496 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-ovsdbserver-sb\") pod \"dnsmasq-dns-54599d8f7-7gq8f\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.363504 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-dns-swift-storage-0\") pod \"dnsmasq-dns-54599d8f7-7gq8f\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.363622 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-config\") pod \"dnsmasq-dns-54599d8f7-7gq8f\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.363675 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-dns-svc\") pod \"dnsmasq-dns-54599d8f7-7gq8f\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.363912 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-ovsdbserver-nb\") pod \"dnsmasq-dns-54599d8f7-7gq8f\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.398313 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqw68\" (UniqueName: \"kubernetes.io/projected/6d11cc8a-2c3e-421f-a156-0a811156876e-kube-api-access-gqw68\") pod \"dnsmasq-dns-54599d8f7-7gq8f\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.476457 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:45 crc kubenswrapper[4871]: I1126 05:46:45.956720 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54599d8f7-7gq8f"] Nov 26 05:46:45 crc kubenswrapper[4871]: W1126 05:46:45.961731 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d11cc8a_2c3e_421f_a156_0a811156876e.slice/crio-2e5534759199e0250d3d6edd4d8b4c5ec22bd95fea617cc6dbb36d99dd6f7289 WatchSource:0}: Error finding container 2e5534759199e0250d3d6edd4d8b4c5ec22bd95fea617cc6dbb36d99dd6f7289: Status 404 returned error can't find the container with id 2e5534759199e0250d3d6edd4d8b4c5ec22bd95fea617cc6dbb36d99dd6f7289 Nov 26 05:46:46 crc kubenswrapper[4871]: I1126 05:46:46.935134 4871 generic.go:334] "Generic (PLEG): container finished" podID="6d11cc8a-2c3e-421f-a156-0a811156876e" containerID="c4835066071e17315a28af1900a77408b1670e16c55821678df6d4ceb007144b" exitCode=0 Nov 26 05:46:46 crc kubenswrapper[4871]: I1126 05:46:46.936815 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" event={"ID":"6d11cc8a-2c3e-421f-a156-0a811156876e","Type":"ContainerDied","Data":"c4835066071e17315a28af1900a77408b1670e16c55821678df6d4ceb007144b"} Nov 26 05:46:46 crc kubenswrapper[4871]: I1126 05:46:46.936849 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" event={"ID":"6d11cc8a-2c3e-421f-a156-0a811156876e","Type":"ContainerStarted","Data":"2e5534759199e0250d3d6edd4d8b4c5ec22bd95fea617cc6dbb36d99dd6f7289"} Nov 26 05:46:47 crc kubenswrapper[4871]: I1126 05:46:47.478643 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:46:47 crc kubenswrapper[4871]: I1126 05:46:47.479477 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f3b8e510-e73e-443d-af27-1b406415874a" containerName="ceilometer-central-agent" containerID="cri-o://c0f767aef6999be3c85acd6a2f4d15eda4713d46381e221b265cc9ca241a1efa" gracePeriod=30 Nov 26 05:46:47 crc kubenswrapper[4871]: I1126 05:46:47.479540 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f3b8e510-e73e-443d-af27-1b406415874a" containerName="proxy-httpd" containerID="cri-o://18f6b63715e969d125aeeadec99e807978fb5fc9911e95a7862cd082f6762b6c" gracePeriod=30 Nov 26 05:46:47 crc kubenswrapper[4871]: I1126 05:46:47.479556 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f3b8e510-e73e-443d-af27-1b406415874a" containerName="sg-core" containerID="cri-o://b1ed857145f01e3ca9774882273148508e62165fcc00122603f56c185228eb67" gracePeriod=30 Nov 26 05:46:47 crc kubenswrapper[4871]: I1126 05:46:47.479686 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f3b8e510-e73e-443d-af27-1b406415874a" containerName="ceilometer-notification-agent" containerID="cri-o://0cf18a43075ff4eec8cd5c514506b996f11104cc9376286a8d819fb5e9afc495" gracePeriod=30 Nov 26 05:46:47 crc kubenswrapper[4871]: I1126 05:46:47.525630 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:47 crc kubenswrapper[4871]: I1126 05:46:47.587420 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 05:46:47 crc kubenswrapper[4871]: I1126 05:46:47.946248 4871 generic.go:334] "Generic (PLEG): container finished" podID="f3b8e510-e73e-443d-af27-1b406415874a" containerID="18f6b63715e969d125aeeadec99e807978fb5fc9911e95a7862cd082f6762b6c" exitCode=0 Nov 26 05:46:47 crc kubenswrapper[4871]: I1126 05:46:47.946279 4871 generic.go:334] "Generic (PLEG): container finished" podID="f3b8e510-e73e-443d-af27-1b406415874a" containerID="b1ed857145f01e3ca9774882273148508e62165fcc00122603f56c185228eb67" exitCode=2 Nov 26 05:46:47 crc kubenswrapper[4871]: I1126 05:46:47.946327 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f3b8e510-e73e-443d-af27-1b406415874a","Type":"ContainerDied","Data":"18f6b63715e969d125aeeadec99e807978fb5fc9911e95a7862cd082f6762b6c"} Nov 26 05:46:47 crc kubenswrapper[4871]: I1126 05:46:47.946356 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f3b8e510-e73e-443d-af27-1b406415874a","Type":"ContainerDied","Data":"b1ed857145f01e3ca9774882273148508e62165fcc00122603f56c185228eb67"} Nov 26 05:46:47 crc kubenswrapper[4871]: I1126 05:46:47.947825 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" event={"ID":"6d11cc8a-2c3e-421f-a156-0a811156876e","Type":"ContainerStarted","Data":"899ee11002f9c3b4772ed8e8a230b29f0a832de03867a0927e557913eb973a84"} Nov 26 05:46:47 crc kubenswrapper[4871]: I1126 05:46:47.947949 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="708f8f9f-0552-4e11-a0cc-af29841a4b3b" containerName="nova-api-api" containerID="cri-o://6a8eb866dd4b0af4eecabc2bb254bf431804e8795d37e843feb6955d6846d3a4" gracePeriod=30 Nov 26 05:46:47 crc kubenswrapper[4871]: I1126 05:46:47.947893 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="708f8f9f-0552-4e11-a0cc-af29841a4b3b" containerName="nova-api-log" containerID="cri-o://cdc03c9544115bd518f766f7e34a3a94318010569fbbd2f2d61f4ac341818776" gracePeriod=30 Nov 26 05:46:47 crc kubenswrapper[4871]: I1126 05:46:47.973830 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" podStartSLOduration=2.973813001 podStartE2EDuration="2.973813001s" podCreationTimestamp="2025-11-26 05:46:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:46:47.968754646 +0000 UTC m=+1266.151806232" watchObservedRunningTime="2025-11-26 05:46:47.973813001 +0000 UTC m=+1266.156864587" Nov 26 05:46:48 crc kubenswrapper[4871]: I1126 05:46:48.982204 4871 generic.go:334] "Generic (PLEG): container finished" podID="f3b8e510-e73e-443d-af27-1b406415874a" containerID="c0f767aef6999be3c85acd6a2f4d15eda4713d46381e221b265cc9ca241a1efa" exitCode=0 Nov 26 05:46:48 crc kubenswrapper[4871]: I1126 05:46:48.982291 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f3b8e510-e73e-443d-af27-1b406415874a","Type":"ContainerDied","Data":"c0f767aef6999be3c85acd6a2f4d15eda4713d46381e221b265cc9ca241a1efa"} Nov 26 05:46:48 crc kubenswrapper[4871]: I1126 05:46:48.991594 4871 generic.go:334] "Generic (PLEG): container finished" podID="708f8f9f-0552-4e11-a0cc-af29841a4b3b" containerID="6a8eb866dd4b0af4eecabc2bb254bf431804e8795d37e843feb6955d6846d3a4" exitCode=0 Nov 26 05:46:48 crc kubenswrapper[4871]: I1126 05:46:48.991629 4871 generic.go:334] "Generic (PLEG): container finished" podID="708f8f9f-0552-4e11-a0cc-af29841a4b3b" containerID="cdc03c9544115bd518f766f7e34a3a94318010569fbbd2f2d61f4ac341818776" exitCode=143 Nov 26 05:46:48 crc kubenswrapper[4871]: I1126 05:46:48.992436 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"708f8f9f-0552-4e11-a0cc-af29841a4b3b","Type":"ContainerDied","Data":"6a8eb866dd4b0af4eecabc2bb254bf431804e8795d37e843feb6955d6846d3a4"} Nov 26 05:46:48 crc kubenswrapper[4871]: I1126 05:46:48.992468 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:48 crc kubenswrapper[4871]: I1126 05:46:48.992479 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"708f8f9f-0552-4e11-a0cc-af29841a4b3b","Type":"ContainerDied","Data":"cdc03c9544115bd518f766f7e34a3a94318010569fbbd2f2d61f4ac341818776"} Nov 26 05:46:49 crc kubenswrapper[4871]: I1126 05:46:49.394928 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 05:46:49 crc kubenswrapper[4871]: I1126 05:46:49.541213 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/708f8f9f-0552-4e11-a0cc-af29841a4b3b-combined-ca-bundle\") pod \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\" (UID: \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\") " Nov 26 05:46:49 crc kubenswrapper[4871]: I1126 05:46:49.541318 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/708f8f9f-0552-4e11-a0cc-af29841a4b3b-config-data\") pod \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\" (UID: \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\") " Nov 26 05:46:49 crc kubenswrapper[4871]: I1126 05:46:49.541380 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/708f8f9f-0552-4e11-a0cc-af29841a4b3b-logs\") pod \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\" (UID: \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\") " Nov 26 05:46:49 crc kubenswrapper[4871]: I1126 05:46:49.541584 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pc268\" (UniqueName: \"kubernetes.io/projected/708f8f9f-0552-4e11-a0cc-af29841a4b3b-kube-api-access-pc268\") pod \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\" (UID: \"708f8f9f-0552-4e11-a0cc-af29841a4b3b\") " Nov 26 05:46:49 crc kubenswrapper[4871]: I1126 05:46:49.542991 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/708f8f9f-0552-4e11-a0cc-af29841a4b3b-logs" (OuterVolumeSpecName: "logs") pod "708f8f9f-0552-4e11-a0cc-af29841a4b3b" (UID: "708f8f9f-0552-4e11-a0cc-af29841a4b3b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:46:49 crc kubenswrapper[4871]: I1126 05:46:49.553936 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/708f8f9f-0552-4e11-a0cc-af29841a4b3b-kube-api-access-pc268" (OuterVolumeSpecName: "kube-api-access-pc268") pod "708f8f9f-0552-4e11-a0cc-af29841a4b3b" (UID: "708f8f9f-0552-4e11-a0cc-af29841a4b3b"). InnerVolumeSpecName "kube-api-access-pc268". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:46:49 crc kubenswrapper[4871]: I1126 05:46:49.574675 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/708f8f9f-0552-4e11-a0cc-af29841a4b3b-config-data" (OuterVolumeSpecName: "config-data") pod "708f8f9f-0552-4e11-a0cc-af29841a4b3b" (UID: "708f8f9f-0552-4e11-a0cc-af29841a4b3b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:49 crc kubenswrapper[4871]: I1126 05:46:49.578052 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/708f8f9f-0552-4e11-a0cc-af29841a4b3b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "708f8f9f-0552-4e11-a0cc-af29841a4b3b" (UID: "708f8f9f-0552-4e11-a0cc-af29841a4b3b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:49 crc kubenswrapper[4871]: I1126 05:46:49.643637 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pc268\" (UniqueName: \"kubernetes.io/projected/708f8f9f-0552-4e11-a0cc-af29841a4b3b-kube-api-access-pc268\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:49 crc kubenswrapper[4871]: I1126 05:46:49.643678 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/708f8f9f-0552-4e11-a0cc-af29841a4b3b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:49 crc kubenswrapper[4871]: I1126 05:46:49.643688 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/708f8f9f-0552-4e11-a0cc-af29841a4b3b-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:49 crc kubenswrapper[4871]: I1126 05:46:49.643698 4871 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/708f8f9f-0552-4e11-a0cc-af29841a4b3b-logs\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.005660 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"708f8f9f-0552-4e11-a0cc-af29841a4b3b","Type":"ContainerDied","Data":"c513fa7d6dd5f9aee629bf375357376069740541a3ed53de9724554c838d67ec"} Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.005712 4871 scope.go:117] "RemoveContainer" containerID="6a8eb866dd4b0af4eecabc2bb254bf431804e8795d37e843feb6955d6846d3a4" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.005842 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.016852 4871 generic.go:334] "Generic (PLEG): container finished" podID="f3b8e510-e73e-443d-af27-1b406415874a" containerID="0cf18a43075ff4eec8cd5c514506b996f11104cc9376286a8d819fb5e9afc495" exitCode=0 Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.016959 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f3b8e510-e73e-443d-af27-1b406415874a","Type":"ContainerDied","Data":"0cf18a43075ff4eec8cd5c514506b996f11104cc9376286a8d819fb5e9afc495"} Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.096160 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.119097 4871 scope.go:117] "RemoveContainer" containerID="cdc03c9544115bd518f766f7e34a3a94318010569fbbd2f2d61f4ac341818776" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.125636 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.133172 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.173048 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 05:46:50 crc kubenswrapper[4871]: E1126 05:46:50.173617 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="708f8f9f-0552-4e11-a0cc-af29841a4b3b" containerName="nova-api-api" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.173640 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="708f8f9f-0552-4e11-a0cc-af29841a4b3b" containerName="nova-api-api" Nov 26 05:46:50 crc kubenswrapper[4871]: E1126 05:46:50.173660 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3b8e510-e73e-443d-af27-1b406415874a" containerName="proxy-httpd" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.173668 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3b8e510-e73e-443d-af27-1b406415874a" containerName="proxy-httpd" Nov 26 05:46:50 crc kubenswrapper[4871]: E1126 05:46:50.173687 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="708f8f9f-0552-4e11-a0cc-af29841a4b3b" containerName="nova-api-log" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.173695 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="708f8f9f-0552-4e11-a0cc-af29841a4b3b" containerName="nova-api-log" Nov 26 05:46:50 crc kubenswrapper[4871]: E1126 05:46:50.173716 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3b8e510-e73e-443d-af27-1b406415874a" containerName="sg-core" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.173726 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3b8e510-e73e-443d-af27-1b406415874a" containerName="sg-core" Nov 26 05:46:50 crc kubenswrapper[4871]: E1126 05:46:50.173757 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3b8e510-e73e-443d-af27-1b406415874a" containerName="ceilometer-central-agent" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.173765 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3b8e510-e73e-443d-af27-1b406415874a" containerName="ceilometer-central-agent" Nov 26 05:46:50 crc kubenswrapper[4871]: E1126 05:46:50.173788 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3b8e510-e73e-443d-af27-1b406415874a" containerName="ceilometer-notification-agent" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.173795 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3b8e510-e73e-443d-af27-1b406415874a" containerName="ceilometer-notification-agent" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.174023 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3b8e510-e73e-443d-af27-1b406415874a" containerName="ceilometer-central-agent" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.174052 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="708f8f9f-0552-4e11-a0cc-af29841a4b3b" containerName="nova-api-log" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.174069 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3b8e510-e73e-443d-af27-1b406415874a" containerName="proxy-httpd" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.174077 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3b8e510-e73e-443d-af27-1b406415874a" containerName="ceilometer-notification-agent" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.174092 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="708f8f9f-0552-4e11-a0cc-af29841a4b3b" containerName="nova-api-api" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.174103 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3b8e510-e73e-443d-af27-1b406415874a" containerName="sg-core" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.175550 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.177025 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.177874 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.178655 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.201906 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.255464 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-combined-ca-bundle\") pod \"f3b8e510-e73e-443d-af27-1b406415874a\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.256218 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-config-data\") pod \"f3b8e510-e73e-443d-af27-1b406415874a\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.256300 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-sg-core-conf-yaml\") pod \"f3b8e510-e73e-443d-af27-1b406415874a\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.256590 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f3b8e510-e73e-443d-af27-1b406415874a-log-httpd\") pod \"f3b8e510-e73e-443d-af27-1b406415874a\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.256645 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-scripts\") pod \"f3b8e510-e73e-443d-af27-1b406415874a\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.256686 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6zmpn\" (UniqueName: \"kubernetes.io/projected/f3b8e510-e73e-443d-af27-1b406415874a-kube-api-access-6zmpn\") pod \"f3b8e510-e73e-443d-af27-1b406415874a\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.256812 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-ceilometer-tls-certs\") pod \"f3b8e510-e73e-443d-af27-1b406415874a\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.256843 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f3b8e510-e73e-443d-af27-1b406415874a-run-httpd\") pod \"f3b8e510-e73e-443d-af27-1b406415874a\" (UID: \"f3b8e510-e73e-443d-af27-1b406415874a\") " Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.257669 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3b8e510-e73e-443d-af27-1b406415874a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f3b8e510-e73e-443d-af27-1b406415874a" (UID: "f3b8e510-e73e-443d-af27-1b406415874a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.258203 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3b8e510-e73e-443d-af27-1b406415874a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f3b8e510-e73e-443d-af27-1b406415874a" (UID: "f3b8e510-e73e-443d-af27-1b406415874a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.262362 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3b8e510-e73e-443d-af27-1b406415874a-kube-api-access-6zmpn" (OuterVolumeSpecName: "kube-api-access-6zmpn") pod "f3b8e510-e73e-443d-af27-1b406415874a" (UID: "f3b8e510-e73e-443d-af27-1b406415874a"). InnerVolumeSpecName "kube-api-access-6zmpn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.264390 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-scripts" (OuterVolumeSpecName: "scripts") pod "f3b8e510-e73e-443d-af27-1b406415874a" (UID: "f3b8e510-e73e-443d-af27-1b406415874a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.311197 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f3b8e510-e73e-443d-af27-1b406415874a" (UID: "f3b8e510-e73e-443d-af27-1b406415874a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.315696 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "f3b8e510-e73e-443d-af27-1b406415874a" (UID: "f3b8e510-e73e-443d-af27-1b406415874a"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.359272 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-public-tls-certs\") pod \"nova-api-0\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.359511 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-internal-tls-certs\") pod \"nova-api-0\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.359705 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnl2t\" (UniqueName: \"kubernetes.io/projected/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-kube-api-access-tnl2t\") pod \"nova-api-0\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.359863 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-logs\") pod \"nova-api-0\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.359994 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.360120 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-config-data\") pod \"nova-api-0\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.360354 4871 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.360425 4871 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f3b8e510-e73e-443d-af27-1b406415874a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.360518 4871 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.360595 4871 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f3b8e510-e73e-443d-af27-1b406415874a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.360647 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.360727 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6zmpn\" (UniqueName: \"kubernetes.io/projected/f3b8e510-e73e-443d-af27-1b406415874a-kube-api-access-6zmpn\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.366367 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f3b8e510-e73e-443d-af27-1b406415874a" (UID: "f3b8e510-e73e-443d-af27-1b406415874a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.386293 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-config-data" (OuterVolumeSpecName: "config-data") pod "f3b8e510-e73e-443d-af27-1b406415874a" (UID: "f3b8e510-e73e-443d-af27-1b406415874a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.462373 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-public-tls-certs\") pod \"nova-api-0\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.463893 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-internal-tls-certs\") pod \"nova-api-0\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.463940 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnl2t\" (UniqueName: \"kubernetes.io/projected/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-kube-api-access-tnl2t\") pod \"nova-api-0\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.464155 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-logs\") pod \"nova-api-0\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.464261 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.464299 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-config-data\") pod \"nova-api-0\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.464457 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.464473 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3b8e510-e73e-443d-af27-1b406415874a-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.465311 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-logs\") pod \"nova-api-0\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.466336 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-public-tls-certs\") pod \"nova-api-0\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.467732 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-config-data\") pod \"nova-api-0\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.468235 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-internal-tls-certs\") pod \"nova-api-0\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.469491 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.487456 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnl2t\" (UniqueName: \"kubernetes.io/projected/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-kube-api-access-tnl2t\") pod \"nova-api-0\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.490372 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.530434 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="708f8f9f-0552-4e11-a0cc-af29841a4b3b" path="/var/lib/kubelet/pods/708f8f9f-0552-4e11-a0cc-af29841a4b3b/volumes" Nov 26 05:46:50 crc kubenswrapper[4871]: I1126 05:46:50.969811 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 05:46:50 crc kubenswrapper[4871]: W1126 05:46:50.980428 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ff1dd07_99f3_4937_b5e7_ce5fd6b72d90.slice/crio-33b854ef2073a86d7daf8c5d36176c02e667aaae248bf65b89d061d309ed7aa7 WatchSource:0}: Error finding container 33b854ef2073a86d7daf8c5d36176c02e667aaae248bf65b89d061d309ed7aa7: Status 404 returned error can't find the container with id 33b854ef2073a86d7daf8c5d36176c02e667aaae248bf65b89d061d309ed7aa7 Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.046181 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f3b8e510-e73e-443d-af27-1b406415874a","Type":"ContainerDied","Data":"c95273ded0c338a999c2fd4c58ce778987f3bc56b10cb36ecb273d92506d37a8"} Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.046276 4871 scope.go:117] "RemoveContainer" containerID="18f6b63715e969d125aeeadec99e807978fb5fc9911e95a7862cd082f6762b6c" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.046206 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.049274 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90","Type":"ContainerStarted","Data":"33b854ef2073a86d7daf8c5d36176c02e667aaae248bf65b89d061d309ed7aa7"} Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.099715 4871 scope.go:117] "RemoveContainer" containerID="b1ed857145f01e3ca9774882273148508e62165fcc00122603f56c185228eb67" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.126062 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.126607 4871 scope.go:117] "RemoveContainer" containerID="0cf18a43075ff4eec8cd5c514506b996f11104cc9376286a8d819fb5e9afc495" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.133061 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.151433 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.154386 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.161045 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.161281 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.161395 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.162443 4871 scope.go:117] "RemoveContainer" containerID="c0f767aef6999be3c85acd6a2f4d15eda4713d46381e221b265cc9ca241a1efa" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.164533 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.285978 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.286286 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-run-httpd\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.286337 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.286450 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-config-data\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.286478 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-scripts\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.286593 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.286625 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5dzm\" (UniqueName: \"kubernetes.io/projected/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-kube-api-access-h5dzm\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.286690 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-log-httpd\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.388038 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-config-data\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.388079 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-scripts\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.388124 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.388152 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5dzm\" (UniqueName: \"kubernetes.io/projected/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-kube-api-access-h5dzm\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.388172 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-log-httpd\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.389041 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.389453 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-log-httpd\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.389659 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-run-httpd\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.389749 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.390245 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-run-httpd\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.393984 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.394357 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-scripts\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.395171 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-config-data\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.395307 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.397962 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.408100 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5dzm\" (UniqueName: \"kubernetes.io/projected/2ffa88fa-bd91-473e-8d4e-44fc61235b3d-kube-api-access-h5dzm\") pod \"ceilometer-0\" (UID: \"2ffa88fa-bd91-473e-8d4e-44fc61235b3d\") " pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.478423 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 05:46:51 crc kubenswrapper[4871]: I1126 05:46:51.959863 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 05:46:51 crc kubenswrapper[4871]: W1126 05:46:51.960708 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ffa88fa_bd91_473e_8d4e_44fc61235b3d.slice/crio-5a0cde4d04c1bea969c9ea1b1d7be1b1af397bf37a7f547c1764f4c05991275c WatchSource:0}: Error finding container 5a0cde4d04c1bea969c9ea1b1d7be1b1af397bf37a7f547c1764f4c05991275c: Status 404 returned error can't find the container with id 5a0cde4d04c1bea969c9ea1b1d7be1b1af397bf37a7f547c1764f4c05991275c Nov 26 05:46:52 crc kubenswrapper[4871]: I1126 05:46:52.063309 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ffa88fa-bd91-473e-8d4e-44fc61235b3d","Type":"ContainerStarted","Data":"5a0cde4d04c1bea969c9ea1b1d7be1b1af397bf37a7f547c1764f4c05991275c"} Nov 26 05:46:52 crc kubenswrapper[4871]: I1126 05:46:52.068429 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90","Type":"ContainerStarted","Data":"c901bc55cad9de45770aa509634adf322098105ef0348ee591bd5f59a4997b99"} Nov 26 05:46:52 crc kubenswrapper[4871]: I1126 05:46:52.068496 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90","Type":"ContainerStarted","Data":"8f69a2427f6e4f595c97fb1ac6b9321df4fa77d361273dc0c25c360715ee6429"} Nov 26 05:46:52 crc kubenswrapper[4871]: I1126 05:46:52.100764 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.100746274 podStartE2EDuration="2.100746274s" podCreationTimestamp="2025-11-26 05:46:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:46:52.092313945 +0000 UTC m=+1270.275365531" watchObservedRunningTime="2025-11-26 05:46:52.100746274 +0000 UTC m=+1270.283797860" Nov 26 05:46:52 crc kubenswrapper[4871]: I1126 05:46:52.538476 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3b8e510-e73e-443d-af27-1b406415874a" path="/var/lib/kubelet/pods/f3b8e510-e73e-443d-af27-1b406415874a/volumes" Nov 26 05:46:52 crc kubenswrapper[4871]: I1126 05:46:52.539257 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:52 crc kubenswrapper[4871]: I1126 05:46:52.756115 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.079953 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ffa88fa-bd91-473e-8d4e-44fc61235b3d","Type":"ContainerStarted","Data":"d53c2766c37e15c1b14aedc4b8b4fa37f78026d333602afdf5b412ce6d0c44b8"} Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.080013 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ffa88fa-bd91-473e-8d4e-44fc61235b3d","Type":"ContainerStarted","Data":"667e2e4f60fe8f1bb0f8ba2ba29bf3c2608af022dd3e3faa49a5fa05b69ab259"} Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.095285 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.440792 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-qz2zl"] Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.442029 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-qz2zl" Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.444118 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.444340 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.455182 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-qz2zl"] Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.554746 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/105ddc2e-1b44-4e32-ba25-1582b633faaa-config-data\") pod \"nova-cell1-cell-mapping-qz2zl\" (UID: \"105ddc2e-1b44-4e32-ba25-1582b633faaa\") " pod="openstack/nova-cell1-cell-mapping-qz2zl" Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.555167 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/105ddc2e-1b44-4e32-ba25-1582b633faaa-scripts\") pod \"nova-cell1-cell-mapping-qz2zl\" (UID: \"105ddc2e-1b44-4e32-ba25-1582b633faaa\") " pod="openstack/nova-cell1-cell-mapping-qz2zl" Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.555332 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvd2t\" (UniqueName: \"kubernetes.io/projected/105ddc2e-1b44-4e32-ba25-1582b633faaa-kube-api-access-lvd2t\") pod \"nova-cell1-cell-mapping-qz2zl\" (UID: \"105ddc2e-1b44-4e32-ba25-1582b633faaa\") " pod="openstack/nova-cell1-cell-mapping-qz2zl" Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.555386 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/105ddc2e-1b44-4e32-ba25-1582b633faaa-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-qz2zl\" (UID: \"105ddc2e-1b44-4e32-ba25-1582b633faaa\") " pod="openstack/nova-cell1-cell-mapping-qz2zl" Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.657011 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/105ddc2e-1b44-4e32-ba25-1582b633faaa-config-data\") pod \"nova-cell1-cell-mapping-qz2zl\" (UID: \"105ddc2e-1b44-4e32-ba25-1582b633faaa\") " pod="openstack/nova-cell1-cell-mapping-qz2zl" Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.657154 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/105ddc2e-1b44-4e32-ba25-1582b633faaa-scripts\") pod \"nova-cell1-cell-mapping-qz2zl\" (UID: \"105ddc2e-1b44-4e32-ba25-1582b633faaa\") " pod="openstack/nova-cell1-cell-mapping-qz2zl" Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.657198 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvd2t\" (UniqueName: \"kubernetes.io/projected/105ddc2e-1b44-4e32-ba25-1582b633faaa-kube-api-access-lvd2t\") pod \"nova-cell1-cell-mapping-qz2zl\" (UID: \"105ddc2e-1b44-4e32-ba25-1582b633faaa\") " pod="openstack/nova-cell1-cell-mapping-qz2zl" Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.657226 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/105ddc2e-1b44-4e32-ba25-1582b633faaa-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-qz2zl\" (UID: \"105ddc2e-1b44-4e32-ba25-1582b633faaa\") " pod="openstack/nova-cell1-cell-mapping-qz2zl" Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.661824 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/105ddc2e-1b44-4e32-ba25-1582b633faaa-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-qz2zl\" (UID: \"105ddc2e-1b44-4e32-ba25-1582b633faaa\") " pod="openstack/nova-cell1-cell-mapping-qz2zl" Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.661893 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/105ddc2e-1b44-4e32-ba25-1582b633faaa-config-data\") pod \"nova-cell1-cell-mapping-qz2zl\" (UID: \"105ddc2e-1b44-4e32-ba25-1582b633faaa\") " pod="openstack/nova-cell1-cell-mapping-qz2zl" Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.662739 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/105ddc2e-1b44-4e32-ba25-1582b633faaa-scripts\") pod \"nova-cell1-cell-mapping-qz2zl\" (UID: \"105ddc2e-1b44-4e32-ba25-1582b633faaa\") " pod="openstack/nova-cell1-cell-mapping-qz2zl" Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.683403 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvd2t\" (UniqueName: \"kubernetes.io/projected/105ddc2e-1b44-4e32-ba25-1582b633faaa-kube-api-access-lvd2t\") pod \"nova-cell1-cell-mapping-qz2zl\" (UID: \"105ddc2e-1b44-4e32-ba25-1582b633faaa\") " pod="openstack/nova-cell1-cell-mapping-qz2zl" Nov 26 05:46:53 crc kubenswrapper[4871]: I1126 05:46:53.796885 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-qz2zl" Nov 26 05:46:54 crc kubenswrapper[4871]: I1126 05:46:54.096671 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ffa88fa-bd91-473e-8d4e-44fc61235b3d","Type":"ContainerStarted","Data":"13dd16b195d23aeb129725c5fb00f636252571059ceeacf81c4a9299115c51ed"} Nov 26 05:46:54 crc kubenswrapper[4871]: I1126 05:46:54.334236 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-qz2zl"] Nov 26 05:46:55 crc kubenswrapper[4871]: I1126 05:46:55.108429 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2ffa88fa-bd91-473e-8d4e-44fc61235b3d","Type":"ContainerStarted","Data":"03b3e40e241593fc3031d208ece08ac141997aa52f45900ba0def6cbab12e548"} Nov 26 05:46:55 crc kubenswrapper[4871]: I1126 05:46:55.108753 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 05:46:55 crc kubenswrapper[4871]: I1126 05:46:55.110231 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-qz2zl" event={"ID":"105ddc2e-1b44-4e32-ba25-1582b633faaa","Type":"ContainerStarted","Data":"23edf9cdd99ee1bd02e0b63ed593e8d29079a97c3186e93a8486de34adfbd3d4"} Nov 26 05:46:55 crc kubenswrapper[4871]: I1126 05:46:55.110278 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-qz2zl" event={"ID":"105ddc2e-1b44-4e32-ba25-1582b633faaa","Type":"ContainerStarted","Data":"64d5a0212d3514650a2d30d52a2399cd6b82d30200420caf078cc7a521def498"} Nov 26 05:46:55 crc kubenswrapper[4871]: I1126 05:46:55.137296 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.556502988 podStartE2EDuration="4.137280031s" podCreationTimestamp="2025-11-26 05:46:51 +0000 UTC" firstStartedPulling="2025-11-26 05:46:51.96463413 +0000 UTC m=+1270.147685716" lastFinishedPulling="2025-11-26 05:46:54.545411163 +0000 UTC m=+1272.728462759" observedRunningTime="2025-11-26 05:46:55.132710848 +0000 UTC m=+1273.315762444" watchObservedRunningTime="2025-11-26 05:46:55.137280031 +0000 UTC m=+1273.320331607" Nov 26 05:46:55 crc kubenswrapper[4871]: I1126 05:46:55.161137 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-qz2zl" podStartSLOduration=2.161111252 podStartE2EDuration="2.161111252s" podCreationTimestamp="2025-11-26 05:46:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:46:55.150994691 +0000 UTC m=+1273.334046277" watchObservedRunningTime="2025-11-26 05:46:55.161111252 +0000 UTC m=+1273.344162858" Nov 26 05:46:55 crc kubenswrapper[4871]: I1126 05:46:55.479255 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:46:55 crc kubenswrapper[4871]: I1126 05:46:55.535764 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-844fc57f6f-g4x5h"] Nov 26 05:46:55 crc kubenswrapper[4871]: I1126 05:46:55.536274 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" podUID="aac7a468-053c-4c4e-a3fb-99fc89d3e939" containerName="dnsmasq-dns" containerID="cri-o://7b88565df9685b8bba28d6fbee6f0a441ed4cfbe71e148f3e663d00c29a3a6b6" gracePeriod=10 Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.093166 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.144811 4871 generic.go:334] "Generic (PLEG): container finished" podID="aac7a468-053c-4c4e-a3fb-99fc89d3e939" containerID="7b88565df9685b8bba28d6fbee6f0a441ed4cfbe71e148f3e663d00c29a3a6b6" exitCode=0 Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.147037 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.147050 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" event={"ID":"aac7a468-053c-4c4e-a3fb-99fc89d3e939","Type":"ContainerDied","Data":"7b88565df9685b8bba28d6fbee6f0a441ed4cfbe71e148f3e663d00c29a3a6b6"} Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.151389 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-844fc57f6f-g4x5h" event={"ID":"aac7a468-053c-4c4e-a3fb-99fc89d3e939","Type":"ContainerDied","Data":"df56ef4de10d376214174df5322c184990c85fa187bf1355daed7fd0e910afcb"} Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.151491 4871 scope.go:117] "RemoveContainer" containerID="7b88565df9685b8bba28d6fbee6f0a441ed4cfbe71e148f3e663d00c29a3a6b6" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.182053 4871 scope.go:117] "RemoveContainer" containerID="293687b83c927fbe9c34ec5a70081a1e16fcf9eda30a0ce18d9304e2e7d98427" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.212151 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-ovsdbserver-sb\") pod \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.212354 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9nz2v\" (UniqueName: \"kubernetes.io/projected/aac7a468-053c-4c4e-a3fb-99fc89d3e939-kube-api-access-9nz2v\") pod \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.212655 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-dns-svc\") pod \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.212972 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-ovsdbserver-nb\") pod \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.213130 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-dns-swift-storage-0\") pod \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.213219 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-config\") pod \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\" (UID: \"aac7a468-053c-4c4e-a3fb-99fc89d3e939\") " Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.238753 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aac7a468-053c-4c4e-a3fb-99fc89d3e939-kube-api-access-9nz2v" (OuterVolumeSpecName: "kube-api-access-9nz2v") pod "aac7a468-053c-4c4e-a3fb-99fc89d3e939" (UID: "aac7a468-053c-4c4e-a3fb-99fc89d3e939"). InnerVolumeSpecName "kube-api-access-9nz2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.297701 4871 scope.go:117] "RemoveContainer" containerID="7b88565df9685b8bba28d6fbee6f0a441ed4cfbe71e148f3e663d00c29a3a6b6" Nov 26 05:46:56 crc kubenswrapper[4871]: E1126 05:46:56.300975 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b88565df9685b8bba28d6fbee6f0a441ed4cfbe71e148f3e663d00c29a3a6b6\": container with ID starting with 7b88565df9685b8bba28d6fbee6f0a441ed4cfbe71e148f3e663d00c29a3a6b6 not found: ID does not exist" containerID="7b88565df9685b8bba28d6fbee6f0a441ed4cfbe71e148f3e663d00c29a3a6b6" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.301016 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b88565df9685b8bba28d6fbee6f0a441ed4cfbe71e148f3e663d00c29a3a6b6"} err="failed to get container status \"7b88565df9685b8bba28d6fbee6f0a441ed4cfbe71e148f3e663d00c29a3a6b6\": rpc error: code = NotFound desc = could not find container \"7b88565df9685b8bba28d6fbee6f0a441ed4cfbe71e148f3e663d00c29a3a6b6\": container with ID starting with 7b88565df9685b8bba28d6fbee6f0a441ed4cfbe71e148f3e663d00c29a3a6b6 not found: ID does not exist" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.301039 4871 scope.go:117] "RemoveContainer" containerID="293687b83c927fbe9c34ec5a70081a1e16fcf9eda30a0ce18d9304e2e7d98427" Nov 26 05:46:56 crc kubenswrapper[4871]: E1126 05:46:56.302241 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"293687b83c927fbe9c34ec5a70081a1e16fcf9eda30a0ce18d9304e2e7d98427\": container with ID starting with 293687b83c927fbe9c34ec5a70081a1e16fcf9eda30a0ce18d9304e2e7d98427 not found: ID does not exist" containerID="293687b83c927fbe9c34ec5a70081a1e16fcf9eda30a0ce18d9304e2e7d98427" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.302265 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"293687b83c927fbe9c34ec5a70081a1e16fcf9eda30a0ce18d9304e2e7d98427"} err="failed to get container status \"293687b83c927fbe9c34ec5a70081a1e16fcf9eda30a0ce18d9304e2e7d98427\": rpc error: code = NotFound desc = could not find container \"293687b83c927fbe9c34ec5a70081a1e16fcf9eda30a0ce18d9304e2e7d98427\": container with ID starting with 293687b83c927fbe9c34ec5a70081a1e16fcf9eda30a0ce18d9304e2e7d98427 not found: ID does not exist" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.315314 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9nz2v\" (UniqueName: \"kubernetes.io/projected/aac7a468-053c-4c4e-a3fb-99fc89d3e939-kube-api-access-9nz2v\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.323884 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "aac7a468-053c-4c4e-a3fb-99fc89d3e939" (UID: "aac7a468-053c-4c4e-a3fb-99fc89d3e939"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.333805 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-config" (OuterVolumeSpecName: "config") pod "aac7a468-053c-4c4e-a3fb-99fc89d3e939" (UID: "aac7a468-053c-4c4e-a3fb-99fc89d3e939"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.357010 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "aac7a468-053c-4c4e-a3fb-99fc89d3e939" (UID: "aac7a468-053c-4c4e-a3fb-99fc89d3e939"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.373778 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "aac7a468-053c-4c4e-a3fb-99fc89d3e939" (UID: "aac7a468-053c-4c4e-a3fb-99fc89d3e939"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.390045 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "aac7a468-053c-4c4e-a3fb-99fc89d3e939" (UID: "aac7a468-053c-4c4e-a3fb-99fc89d3e939"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.417049 4871 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.417078 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.417088 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.417097 4871 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.417105 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aac7a468-053c-4c4e-a3fb-99fc89d3e939-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.478094 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-844fc57f6f-g4x5h"] Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.488266 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-844fc57f6f-g4x5h"] Nov 26 05:46:56 crc kubenswrapper[4871]: I1126 05:46:56.519012 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aac7a468-053c-4c4e-a3fb-99fc89d3e939" path="/var/lib/kubelet/pods/aac7a468-053c-4c4e-a3fb-99fc89d3e939/volumes" Nov 26 05:47:00 crc kubenswrapper[4871]: I1126 05:47:00.197302 4871 generic.go:334] "Generic (PLEG): container finished" podID="105ddc2e-1b44-4e32-ba25-1582b633faaa" containerID="23edf9cdd99ee1bd02e0b63ed593e8d29079a97c3186e93a8486de34adfbd3d4" exitCode=0 Nov 26 05:47:00 crc kubenswrapper[4871]: I1126 05:47:00.197426 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-qz2zl" event={"ID":"105ddc2e-1b44-4e32-ba25-1582b633faaa","Type":"ContainerDied","Data":"23edf9cdd99ee1bd02e0b63ed593e8d29079a97c3186e93a8486de34adfbd3d4"} Nov 26 05:47:00 crc kubenswrapper[4871]: I1126 05:47:00.491144 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 05:47:00 crc kubenswrapper[4871]: I1126 05:47:00.491230 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 05:47:01 crc kubenswrapper[4871]: I1126 05:47:01.509709 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.222:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 05:47:01 crc kubenswrapper[4871]: I1126 05:47:01.509941 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.222:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 05:47:01 crc kubenswrapper[4871]: I1126 05:47:01.650771 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-qz2zl" Nov 26 05:47:01 crc kubenswrapper[4871]: I1126 05:47:01.742293 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/105ddc2e-1b44-4e32-ba25-1582b633faaa-config-data\") pod \"105ddc2e-1b44-4e32-ba25-1582b633faaa\" (UID: \"105ddc2e-1b44-4e32-ba25-1582b633faaa\") " Nov 26 05:47:01 crc kubenswrapper[4871]: I1126 05:47:01.742402 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lvd2t\" (UniqueName: \"kubernetes.io/projected/105ddc2e-1b44-4e32-ba25-1582b633faaa-kube-api-access-lvd2t\") pod \"105ddc2e-1b44-4e32-ba25-1582b633faaa\" (UID: \"105ddc2e-1b44-4e32-ba25-1582b633faaa\") " Nov 26 05:47:01 crc kubenswrapper[4871]: I1126 05:47:01.742616 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/105ddc2e-1b44-4e32-ba25-1582b633faaa-scripts\") pod \"105ddc2e-1b44-4e32-ba25-1582b633faaa\" (UID: \"105ddc2e-1b44-4e32-ba25-1582b633faaa\") " Nov 26 05:47:01 crc kubenswrapper[4871]: I1126 05:47:01.742725 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/105ddc2e-1b44-4e32-ba25-1582b633faaa-combined-ca-bundle\") pod \"105ddc2e-1b44-4e32-ba25-1582b633faaa\" (UID: \"105ddc2e-1b44-4e32-ba25-1582b633faaa\") " Nov 26 05:47:01 crc kubenswrapper[4871]: I1126 05:47:01.750139 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/105ddc2e-1b44-4e32-ba25-1582b633faaa-scripts" (OuterVolumeSpecName: "scripts") pod "105ddc2e-1b44-4e32-ba25-1582b633faaa" (UID: "105ddc2e-1b44-4e32-ba25-1582b633faaa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:47:01 crc kubenswrapper[4871]: I1126 05:47:01.754768 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/105ddc2e-1b44-4e32-ba25-1582b633faaa-kube-api-access-lvd2t" (OuterVolumeSpecName: "kube-api-access-lvd2t") pod "105ddc2e-1b44-4e32-ba25-1582b633faaa" (UID: "105ddc2e-1b44-4e32-ba25-1582b633faaa"). InnerVolumeSpecName "kube-api-access-lvd2t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:47:01 crc kubenswrapper[4871]: I1126 05:47:01.776241 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/105ddc2e-1b44-4e32-ba25-1582b633faaa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "105ddc2e-1b44-4e32-ba25-1582b633faaa" (UID: "105ddc2e-1b44-4e32-ba25-1582b633faaa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:47:01 crc kubenswrapper[4871]: I1126 05:47:01.794935 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/105ddc2e-1b44-4e32-ba25-1582b633faaa-config-data" (OuterVolumeSpecName: "config-data") pod "105ddc2e-1b44-4e32-ba25-1582b633faaa" (UID: "105ddc2e-1b44-4e32-ba25-1582b633faaa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:47:01 crc kubenswrapper[4871]: I1126 05:47:01.845593 4871 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/105ddc2e-1b44-4e32-ba25-1582b633faaa-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:01 crc kubenswrapper[4871]: I1126 05:47:01.845625 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/105ddc2e-1b44-4e32-ba25-1582b633faaa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:01 crc kubenswrapper[4871]: I1126 05:47:01.845636 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/105ddc2e-1b44-4e32-ba25-1582b633faaa-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:01 crc kubenswrapper[4871]: I1126 05:47:01.845645 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lvd2t\" (UniqueName: \"kubernetes.io/projected/105ddc2e-1b44-4e32-ba25-1582b633faaa-kube-api-access-lvd2t\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:02 crc kubenswrapper[4871]: I1126 05:47:02.232989 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-qz2zl" event={"ID":"105ddc2e-1b44-4e32-ba25-1582b633faaa","Type":"ContainerDied","Data":"64d5a0212d3514650a2d30d52a2399cd6b82d30200420caf078cc7a521def498"} Nov 26 05:47:02 crc kubenswrapper[4871]: I1126 05:47:02.233043 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64d5a0212d3514650a2d30d52a2399cd6b82d30200420caf078cc7a521def498" Nov 26 05:47:02 crc kubenswrapper[4871]: I1126 05:47:02.233124 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-qz2zl" Nov 26 05:47:02 crc kubenswrapper[4871]: I1126 05:47:02.423459 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 05:47:02 crc kubenswrapper[4871]: I1126 05:47:02.423879 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90" containerName="nova-api-log" containerID="cri-o://8f69a2427f6e4f595c97fb1ac6b9321df4fa77d361273dc0c25c360715ee6429" gracePeriod=30 Nov 26 05:47:02 crc kubenswrapper[4871]: I1126 05:47:02.423988 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90" containerName="nova-api-api" containerID="cri-o://c901bc55cad9de45770aa509634adf322098105ef0348ee591bd5f59a4997b99" gracePeriod=30 Nov 26 05:47:02 crc kubenswrapper[4871]: I1126 05:47:02.445021 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 05:47:02 crc kubenswrapper[4871]: I1126 05:47:02.445288 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="216f8390-d77e-4760-af6d-838c7f7eb057" containerName="nova-scheduler-scheduler" containerID="cri-o://4e1e7b36b71431ecaf074304bd6bd5eb7a3f2e89f2384539f419d16b59fa9d1d" gracePeriod=30 Nov 26 05:47:02 crc kubenswrapper[4871]: I1126 05:47:02.468628 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:47:02 crc kubenswrapper[4871]: I1126 05:47:02.468885 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="9af59634-8618-4934-8a71-606bcde10c43" containerName="nova-metadata-log" containerID="cri-o://f875ba7d40b8e668c71b071714d435ca9aef02c364572f25828337bd34d78636" gracePeriod=30 Nov 26 05:47:02 crc kubenswrapper[4871]: I1126 05:47:02.468992 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="9af59634-8618-4934-8a71-606bcde10c43" containerName="nova-metadata-metadata" containerID="cri-o://5ac1d3eed6816da396a8d7235c2650aa398b83b8ab2e071bba92aa0736c529cd" gracePeriod=30 Nov 26 05:47:03 crc kubenswrapper[4871]: I1126 05:47:03.248188 4871 generic.go:334] "Generic (PLEG): container finished" podID="2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90" containerID="8f69a2427f6e4f595c97fb1ac6b9321df4fa77d361273dc0c25c360715ee6429" exitCode=143 Nov 26 05:47:03 crc kubenswrapper[4871]: I1126 05:47:03.248258 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90","Type":"ContainerDied","Data":"8f69a2427f6e4f595c97fb1ac6b9321df4fa77d361273dc0c25c360715ee6429"} Nov 26 05:47:03 crc kubenswrapper[4871]: I1126 05:47:03.253042 4871 generic.go:334] "Generic (PLEG): container finished" podID="216f8390-d77e-4760-af6d-838c7f7eb057" containerID="4e1e7b36b71431ecaf074304bd6bd5eb7a3f2e89f2384539f419d16b59fa9d1d" exitCode=0 Nov 26 05:47:03 crc kubenswrapper[4871]: I1126 05:47:03.253131 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"216f8390-d77e-4760-af6d-838c7f7eb057","Type":"ContainerDied","Data":"4e1e7b36b71431ecaf074304bd6bd5eb7a3f2e89f2384539f419d16b59fa9d1d"} Nov 26 05:47:03 crc kubenswrapper[4871]: I1126 05:47:03.254427 4871 generic.go:334] "Generic (PLEG): container finished" podID="9af59634-8618-4934-8a71-606bcde10c43" containerID="f875ba7d40b8e668c71b071714d435ca9aef02c364572f25828337bd34d78636" exitCode=143 Nov 26 05:47:03 crc kubenswrapper[4871]: I1126 05:47:03.254450 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9af59634-8618-4934-8a71-606bcde10c43","Type":"ContainerDied","Data":"f875ba7d40b8e668c71b071714d435ca9aef02c364572f25828337bd34d78636"} Nov 26 05:47:03 crc kubenswrapper[4871]: I1126 05:47:03.663228 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 05:47:03 crc kubenswrapper[4871]: I1126 05:47:03.803156 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/216f8390-d77e-4760-af6d-838c7f7eb057-config-data\") pod \"216f8390-d77e-4760-af6d-838c7f7eb057\" (UID: \"216f8390-d77e-4760-af6d-838c7f7eb057\") " Nov 26 05:47:03 crc kubenswrapper[4871]: I1126 05:47:03.803364 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqnft\" (UniqueName: \"kubernetes.io/projected/216f8390-d77e-4760-af6d-838c7f7eb057-kube-api-access-fqnft\") pod \"216f8390-d77e-4760-af6d-838c7f7eb057\" (UID: \"216f8390-d77e-4760-af6d-838c7f7eb057\") " Nov 26 05:47:03 crc kubenswrapper[4871]: I1126 05:47:03.803501 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/216f8390-d77e-4760-af6d-838c7f7eb057-combined-ca-bundle\") pod \"216f8390-d77e-4760-af6d-838c7f7eb057\" (UID: \"216f8390-d77e-4760-af6d-838c7f7eb057\") " Nov 26 05:47:03 crc kubenswrapper[4871]: I1126 05:47:03.875295 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/216f8390-d77e-4760-af6d-838c7f7eb057-kube-api-access-fqnft" (OuterVolumeSpecName: "kube-api-access-fqnft") pod "216f8390-d77e-4760-af6d-838c7f7eb057" (UID: "216f8390-d77e-4760-af6d-838c7f7eb057"). InnerVolumeSpecName "kube-api-access-fqnft". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:47:03 crc kubenswrapper[4871]: I1126 05:47:03.883265 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/216f8390-d77e-4760-af6d-838c7f7eb057-config-data" (OuterVolumeSpecName: "config-data") pod "216f8390-d77e-4760-af6d-838c7f7eb057" (UID: "216f8390-d77e-4760-af6d-838c7f7eb057"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:47:03 crc kubenswrapper[4871]: I1126 05:47:03.909612 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/216f8390-d77e-4760-af6d-838c7f7eb057-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:03 crc kubenswrapper[4871]: I1126 05:47:03.909645 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqnft\" (UniqueName: \"kubernetes.io/projected/216f8390-d77e-4760-af6d-838c7f7eb057-kube-api-access-fqnft\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:03 crc kubenswrapper[4871]: I1126 05:47:03.909731 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/216f8390-d77e-4760-af6d-838c7f7eb057-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "216f8390-d77e-4760-af6d-838c7f7eb057" (UID: "216f8390-d77e-4760-af6d-838c7f7eb057"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.012357 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/216f8390-d77e-4760-af6d-838c7f7eb057-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.267240 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"216f8390-d77e-4760-af6d-838c7f7eb057","Type":"ContainerDied","Data":"ea942ca8b71c481ae43516385bdd8b103eacd9d644aadbfb6e980bb5c6327635"} Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.267285 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.267301 4871 scope.go:117] "RemoveContainer" containerID="4e1e7b36b71431ecaf074304bd6bd5eb7a3f2e89f2384539f419d16b59fa9d1d" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.271016 4871 generic.go:334] "Generic (PLEG): container finished" podID="9af59634-8618-4934-8a71-606bcde10c43" containerID="5ac1d3eed6816da396a8d7235c2650aa398b83b8ab2e071bba92aa0736c529cd" exitCode=0 Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.271052 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9af59634-8618-4934-8a71-606bcde10c43","Type":"ContainerDied","Data":"5ac1d3eed6816da396a8d7235c2650aa398b83b8ab2e071bba92aa0736c529cd"} Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.271072 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9af59634-8618-4934-8a71-606bcde10c43","Type":"ContainerDied","Data":"42d05905e92f2befc9b3ec3dc89e6a3fe8598cd46dfc468f374cbae02e7013fd"} Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.271084 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="42d05905e92f2befc9b3ec3dc89e6a3fe8598cd46dfc468f374cbae02e7013fd" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.314971 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.345604 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.365615 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.393903 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 05:47:04 crc kubenswrapper[4871]: E1126 05:47:04.394365 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9af59634-8618-4934-8a71-606bcde10c43" containerName="nova-metadata-log" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.394382 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="9af59634-8618-4934-8a71-606bcde10c43" containerName="nova-metadata-log" Nov 26 05:47:04 crc kubenswrapper[4871]: E1126 05:47:04.394396 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9af59634-8618-4934-8a71-606bcde10c43" containerName="nova-metadata-metadata" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.394402 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="9af59634-8618-4934-8a71-606bcde10c43" containerName="nova-metadata-metadata" Nov 26 05:47:04 crc kubenswrapper[4871]: E1126 05:47:04.394417 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aac7a468-053c-4c4e-a3fb-99fc89d3e939" containerName="dnsmasq-dns" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.394426 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="aac7a468-053c-4c4e-a3fb-99fc89d3e939" containerName="dnsmasq-dns" Nov 26 05:47:04 crc kubenswrapper[4871]: E1126 05:47:04.394437 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aac7a468-053c-4c4e-a3fb-99fc89d3e939" containerName="init" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.394482 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="aac7a468-053c-4c4e-a3fb-99fc89d3e939" containerName="init" Nov 26 05:47:04 crc kubenswrapper[4871]: E1126 05:47:04.394511 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="216f8390-d77e-4760-af6d-838c7f7eb057" containerName="nova-scheduler-scheduler" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.394518 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="216f8390-d77e-4760-af6d-838c7f7eb057" containerName="nova-scheduler-scheduler" Nov 26 05:47:04 crc kubenswrapper[4871]: E1126 05:47:04.394780 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="105ddc2e-1b44-4e32-ba25-1582b633faaa" containerName="nova-manage" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.394790 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="105ddc2e-1b44-4e32-ba25-1582b633faaa" containerName="nova-manage" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.395104 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="105ddc2e-1b44-4e32-ba25-1582b633faaa" containerName="nova-manage" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.395125 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="aac7a468-053c-4c4e-a3fb-99fc89d3e939" containerName="dnsmasq-dns" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.395147 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="9af59634-8618-4934-8a71-606bcde10c43" containerName="nova-metadata-log" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.395159 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="216f8390-d77e-4760-af6d-838c7f7eb057" containerName="nova-scheduler-scheduler" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.395165 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="9af59634-8618-4934-8a71-606bcde10c43" containerName="nova-metadata-metadata" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.395839 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.401762 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.402175 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.421225 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k4bk4\" (UniqueName: \"kubernetes.io/projected/9af59634-8618-4934-8a71-606bcde10c43-kube-api-access-k4bk4\") pod \"9af59634-8618-4934-8a71-606bcde10c43\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.421367 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9af59634-8618-4934-8a71-606bcde10c43-combined-ca-bundle\") pod \"9af59634-8618-4934-8a71-606bcde10c43\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.421412 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9af59634-8618-4934-8a71-606bcde10c43-logs\") pod \"9af59634-8618-4934-8a71-606bcde10c43\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.421457 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9af59634-8618-4934-8a71-606bcde10c43-config-data\") pod \"9af59634-8618-4934-8a71-606bcde10c43\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.421575 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9af59634-8618-4934-8a71-606bcde10c43-nova-metadata-tls-certs\") pod \"9af59634-8618-4934-8a71-606bcde10c43\" (UID: \"9af59634-8618-4934-8a71-606bcde10c43\") " Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.421863 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9af59634-8618-4934-8a71-606bcde10c43-logs" (OuterVolumeSpecName: "logs") pod "9af59634-8618-4934-8a71-606bcde10c43" (UID: "9af59634-8618-4934-8a71-606bcde10c43"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.422818 4871 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9af59634-8618-4934-8a71-606bcde10c43-logs\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.425514 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9af59634-8618-4934-8a71-606bcde10c43-kube-api-access-k4bk4" (OuterVolumeSpecName: "kube-api-access-k4bk4") pod "9af59634-8618-4934-8a71-606bcde10c43" (UID: "9af59634-8618-4934-8a71-606bcde10c43"). InnerVolumeSpecName "kube-api-access-k4bk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.449799 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9af59634-8618-4934-8a71-606bcde10c43-config-data" (OuterVolumeSpecName: "config-data") pod "9af59634-8618-4934-8a71-606bcde10c43" (UID: "9af59634-8618-4934-8a71-606bcde10c43"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.451297 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9af59634-8618-4934-8a71-606bcde10c43-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9af59634-8618-4934-8a71-606bcde10c43" (UID: "9af59634-8618-4934-8a71-606bcde10c43"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.496216 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9af59634-8618-4934-8a71-606bcde10c43-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "9af59634-8618-4934-8a71-606bcde10c43" (UID: "9af59634-8618-4934-8a71-606bcde10c43"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.519286 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="216f8390-d77e-4760-af6d-838c7f7eb057" path="/var/lib/kubelet/pods/216f8390-d77e-4760-af6d-838c7f7eb057/volumes" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.524247 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27f01c4d-ea3c-4e99-ba8a-e31d9628307b-config-data\") pod \"nova-scheduler-0\" (UID: \"27f01c4d-ea3c-4e99-ba8a-e31d9628307b\") " pod="openstack/nova-scheduler-0" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.524990 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79swr\" (UniqueName: \"kubernetes.io/projected/27f01c4d-ea3c-4e99-ba8a-e31d9628307b-kube-api-access-79swr\") pod \"nova-scheduler-0\" (UID: \"27f01c4d-ea3c-4e99-ba8a-e31d9628307b\") " pod="openstack/nova-scheduler-0" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.525097 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27f01c4d-ea3c-4e99-ba8a-e31d9628307b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"27f01c4d-ea3c-4e99-ba8a-e31d9628307b\") " pod="openstack/nova-scheduler-0" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.525217 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9af59634-8618-4934-8a71-606bcde10c43-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.525240 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9af59634-8618-4934-8a71-606bcde10c43-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.525250 4871 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9af59634-8618-4934-8a71-606bcde10c43-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.525264 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k4bk4\" (UniqueName: \"kubernetes.io/projected/9af59634-8618-4934-8a71-606bcde10c43-kube-api-access-k4bk4\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.627203 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27f01c4d-ea3c-4e99-ba8a-e31d9628307b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"27f01c4d-ea3c-4e99-ba8a-e31d9628307b\") " pod="openstack/nova-scheduler-0" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.627330 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27f01c4d-ea3c-4e99-ba8a-e31d9628307b-config-data\") pod \"nova-scheduler-0\" (UID: \"27f01c4d-ea3c-4e99-ba8a-e31d9628307b\") " pod="openstack/nova-scheduler-0" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.627387 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79swr\" (UniqueName: \"kubernetes.io/projected/27f01c4d-ea3c-4e99-ba8a-e31d9628307b-kube-api-access-79swr\") pod \"nova-scheduler-0\" (UID: \"27f01c4d-ea3c-4e99-ba8a-e31d9628307b\") " pod="openstack/nova-scheduler-0" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.632122 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27f01c4d-ea3c-4e99-ba8a-e31d9628307b-config-data\") pod \"nova-scheduler-0\" (UID: \"27f01c4d-ea3c-4e99-ba8a-e31d9628307b\") " pod="openstack/nova-scheduler-0" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.633671 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27f01c4d-ea3c-4e99-ba8a-e31d9628307b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"27f01c4d-ea3c-4e99-ba8a-e31d9628307b\") " pod="openstack/nova-scheduler-0" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.644146 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79swr\" (UniqueName: \"kubernetes.io/projected/27f01c4d-ea3c-4e99-ba8a-e31d9628307b-kube-api-access-79swr\") pod \"nova-scheduler-0\" (UID: \"27f01c4d-ea3c-4e99-ba8a-e31d9628307b\") " pod="openstack/nova-scheduler-0" Nov 26 05:47:04 crc kubenswrapper[4871]: I1126 05:47:04.722244 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.200617 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.292646 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.292659 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"27f01c4d-ea3c-4e99-ba8a-e31d9628307b","Type":"ContainerStarted","Data":"74c45ad5e92b91b40e719efeb2eaf0ad8978042f102708793fc02460757f6ae5"} Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.343878 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.360921 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.377849 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.381722 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.383350 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.384820 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.387418 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.558800 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8636384-aac2-4fd2-8f51-5cd6ca47c362-config-data\") pod \"nova-metadata-0\" (UID: \"d8636384-aac2-4fd2-8f51-5cd6ca47c362\") " pod="openstack/nova-metadata-0" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.558862 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qgx9p\" (UniqueName: \"kubernetes.io/projected/d8636384-aac2-4fd2-8f51-5cd6ca47c362-kube-api-access-qgx9p\") pod \"nova-metadata-0\" (UID: \"d8636384-aac2-4fd2-8f51-5cd6ca47c362\") " pod="openstack/nova-metadata-0" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.559074 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8636384-aac2-4fd2-8f51-5cd6ca47c362-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d8636384-aac2-4fd2-8f51-5cd6ca47c362\") " pod="openstack/nova-metadata-0" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.559136 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8636384-aac2-4fd2-8f51-5cd6ca47c362-logs\") pod \"nova-metadata-0\" (UID: \"d8636384-aac2-4fd2-8f51-5cd6ca47c362\") " pod="openstack/nova-metadata-0" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.559163 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8636384-aac2-4fd2-8f51-5cd6ca47c362-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d8636384-aac2-4fd2-8f51-5cd6ca47c362\") " pod="openstack/nova-metadata-0" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.661191 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8636384-aac2-4fd2-8f51-5cd6ca47c362-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d8636384-aac2-4fd2-8f51-5cd6ca47c362\") " pod="openstack/nova-metadata-0" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.661272 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8636384-aac2-4fd2-8f51-5cd6ca47c362-logs\") pod \"nova-metadata-0\" (UID: \"d8636384-aac2-4fd2-8f51-5cd6ca47c362\") " pod="openstack/nova-metadata-0" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.661296 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8636384-aac2-4fd2-8f51-5cd6ca47c362-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d8636384-aac2-4fd2-8f51-5cd6ca47c362\") " pod="openstack/nova-metadata-0" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.661386 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8636384-aac2-4fd2-8f51-5cd6ca47c362-config-data\") pod \"nova-metadata-0\" (UID: \"d8636384-aac2-4fd2-8f51-5cd6ca47c362\") " pod="openstack/nova-metadata-0" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.661421 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qgx9p\" (UniqueName: \"kubernetes.io/projected/d8636384-aac2-4fd2-8f51-5cd6ca47c362-kube-api-access-qgx9p\") pod \"nova-metadata-0\" (UID: \"d8636384-aac2-4fd2-8f51-5cd6ca47c362\") " pod="openstack/nova-metadata-0" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.662079 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8636384-aac2-4fd2-8f51-5cd6ca47c362-logs\") pod \"nova-metadata-0\" (UID: \"d8636384-aac2-4fd2-8f51-5cd6ca47c362\") " pod="openstack/nova-metadata-0" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.666194 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8636384-aac2-4fd2-8f51-5cd6ca47c362-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d8636384-aac2-4fd2-8f51-5cd6ca47c362\") " pod="openstack/nova-metadata-0" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.667836 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8636384-aac2-4fd2-8f51-5cd6ca47c362-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d8636384-aac2-4fd2-8f51-5cd6ca47c362\") " pod="openstack/nova-metadata-0" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.668929 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8636384-aac2-4fd2-8f51-5cd6ca47c362-config-data\") pod \"nova-metadata-0\" (UID: \"d8636384-aac2-4fd2-8f51-5cd6ca47c362\") " pod="openstack/nova-metadata-0" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.699042 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qgx9p\" (UniqueName: \"kubernetes.io/projected/d8636384-aac2-4fd2-8f51-5cd6ca47c362-kube-api-access-qgx9p\") pod \"nova-metadata-0\" (UID: \"d8636384-aac2-4fd2-8f51-5cd6ca47c362\") " pod="openstack/nova-metadata-0" Nov 26 05:47:05 crc kubenswrapper[4871]: I1126 05:47:05.999326 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.156708 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.275338 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-config-data\") pod \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.275455 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-combined-ca-bundle\") pod \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.275483 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-logs\") pod \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.275636 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-internal-tls-certs\") pod \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.275689 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tnl2t\" (UniqueName: \"kubernetes.io/projected/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-kube-api-access-tnl2t\") pod \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.275739 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-public-tls-certs\") pod \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\" (UID: \"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90\") " Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.276884 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-logs" (OuterVolumeSpecName: "logs") pod "2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90" (UID: "2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.283472 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-kube-api-access-tnl2t" (OuterVolumeSpecName: "kube-api-access-tnl2t") pod "2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90" (UID: "2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90"). InnerVolumeSpecName "kube-api-access-tnl2t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.313830 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90" (UID: "2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.315193 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"27f01c4d-ea3c-4e99-ba8a-e31d9628307b","Type":"ContainerStarted","Data":"e56e827dd07752bead4640fcb310b70bffb210d5cb212bcff02bffe159163b3e"} Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.316723 4871 generic.go:334] "Generic (PLEG): container finished" podID="2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90" containerID="c901bc55cad9de45770aa509634adf322098105ef0348ee591bd5f59a4997b99" exitCode=0 Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.316768 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90","Type":"ContainerDied","Data":"c901bc55cad9de45770aa509634adf322098105ef0348ee591bd5f59a4997b99"} Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.316797 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90","Type":"ContainerDied","Data":"33b854ef2073a86d7daf8c5d36176c02e667aaae248bf65b89d061d309ed7aa7"} Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.316814 4871 scope.go:117] "RemoveContainer" containerID="c901bc55cad9de45770aa509634adf322098105ef0348ee591bd5f59a4997b99" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.316859 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.328444 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-config-data" (OuterVolumeSpecName: "config-data") pod "2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90" (UID: "2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.337614 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.33759301 podStartE2EDuration="2.33759301s" podCreationTimestamp="2025-11-26 05:47:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:47:06.330705589 +0000 UTC m=+1284.513757165" watchObservedRunningTime="2025-11-26 05:47:06.33759301 +0000 UTC m=+1284.520644606" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.348085 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90" (UID: "2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.354716 4871 scope.go:117] "RemoveContainer" containerID="8f69a2427f6e4f595c97fb1ac6b9321df4fa77d361273dc0c25c360715ee6429" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.382471 4871 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.382507 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tnl2t\" (UniqueName: \"kubernetes.io/projected/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-kube-api-access-tnl2t\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.382518 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.382542 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.382551 4871 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-logs\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.383833 4871 scope.go:117] "RemoveContainer" containerID="c901bc55cad9de45770aa509634adf322098105ef0348ee591bd5f59a4997b99" Nov 26 05:47:06 crc kubenswrapper[4871]: E1126 05:47:06.384164 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c901bc55cad9de45770aa509634adf322098105ef0348ee591bd5f59a4997b99\": container with ID starting with c901bc55cad9de45770aa509634adf322098105ef0348ee591bd5f59a4997b99 not found: ID does not exist" containerID="c901bc55cad9de45770aa509634adf322098105ef0348ee591bd5f59a4997b99" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.384198 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c901bc55cad9de45770aa509634adf322098105ef0348ee591bd5f59a4997b99"} err="failed to get container status \"c901bc55cad9de45770aa509634adf322098105ef0348ee591bd5f59a4997b99\": rpc error: code = NotFound desc = could not find container \"c901bc55cad9de45770aa509634adf322098105ef0348ee591bd5f59a4997b99\": container with ID starting with c901bc55cad9de45770aa509634adf322098105ef0348ee591bd5f59a4997b99 not found: ID does not exist" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.384216 4871 scope.go:117] "RemoveContainer" containerID="8f69a2427f6e4f595c97fb1ac6b9321df4fa77d361273dc0c25c360715ee6429" Nov 26 05:47:06 crc kubenswrapper[4871]: E1126 05:47:06.384425 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f69a2427f6e4f595c97fb1ac6b9321df4fa77d361273dc0c25c360715ee6429\": container with ID starting with 8f69a2427f6e4f595c97fb1ac6b9321df4fa77d361273dc0c25c360715ee6429 not found: ID does not exist" containerID="8f69a2427f6e4f595c97fb1ac6b9321df4fa77d361273dc0c25c360715ee6429" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.384449 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f69a2427f6e4f595c97fb1ac6b9321df4fa77d361273dc0c25c360715ee6429"} err="failed to get container status \"8f69a2427f6e4f595c97fb1ac6b9321df4fa77d361273dc0c25c360715ee6429\": rpc error: code = NotFound desc = could not find container \"8f69a2427f6e4f595c97fb1ac6b9321df4fa77d361273dc0c25c360715ee6429\": container with ID starting with 8f69a2427f6e4f595c97fb1ac6b9321df4fa77d361273dc0c25c360715ee6429 not found: ID does not exist" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.386090 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90" (UID: "2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.485327 4871 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.489702 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 05:47:06 crc kubenswrapper[4871]: W1126 05:47:06.492761 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8636384_aac2_4fd2_8f51_5cd6ca47c362.slice/crio-709c8609cf7620804c1a0b0ba046ba980095dfa3d51e284028be1efee2cce8b1 WatchSource:0}: Error finding container 709c8609cf7620804c1a0b0ba046ba980095dfa3d51e284028be1efee2cce8b1: Status 404 returned error can't find the container with id 709c8609cf7620804c1a0b0ba046ba980095dfa3d51e284028be1efee2cce8b1 Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.532361 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9af59634-8618-4934-8a71-606bcde10c43" path="/var/lib/kubelet/pods/9af59634-8618-4934-8a71-606bcde10c43/volumes" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.644222 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.668416 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.686984 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 05:47:06 crc kubenswrapper[4871]: E1126 05:47:06.687733 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90" containerName="nova-api-api" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.687833 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90" containerName="nova-api-api" Nov 26 05:47:06 crc kubenswrapper[4871]: E1126 05:47:06.687917 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90" containerName="nova-api-log" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.688013 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90" containerName="nova-api-log" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.689631 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90" containerName="nova-api-api" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.689675 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90" containerName="nova-api-log" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.691187 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.691287 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.693704 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.693828 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.693976 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.792249 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0049ce2-17f9-4372-a66e-7c03a3763460-config-data\") pod \"nova-api-0\" (UID: \"b0049ce2-17f9-4372-a66e-7c03a3763460\") " pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.792701 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0049ce2-17f9-4372-a66e-7c03a3763460-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b0049ce2-17f9-4372-a66e-7c03a3763460\") " pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.792783 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b0049ce2-17f9-4372-a66e-7c03a3763460-logs\") pod \"nova-api-0\" (UID: \"b0049ce2-17f9-4372-a66e-7c03a3763460\") " pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.792806 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0049ce2-17f9-4372-a66e-7c03a3763460-public-tls-certs\") pod \"nova-api-0\" (UID: \"b0049ce2-17f9-4372-a66e-7c03a3763460\") " pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.792853 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z86zz\" (UniqueName: \"kubernetes.io/projected/b0049ce2-17f9-4372-a66e-7c03a3763460-kube-api-access-z86zz\") pod \"nova-api-0\" (UID: \"b0049ce2-17f9-4372-a66e-7c03a3763460\") " pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.792877 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0049ce2-17f9-4372-a66e-7c03a3763460-internal-tls-certs\") pod \"nova-api-0\" (UID: \"b0049ce2-17f9-4372-a66e-7c03a3763460\") " pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.894151 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0049ce2-17f9-4372-a66e-7c03a3763460-config-data\") pod \"nova-api-0\" (UID: \"b0049ce2-17f9-4372-a66e-7c03a3763460\") " pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.894257 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0049ce2-17f9-4372-a66e-7c03a3763460-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b0049ce2-17f9-4372-a66e-7c03a3763460\") " pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.894397 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b0049ce2-17f9-4372-a66e-7c03a3763460-logs\") pod \"nova-api-0\" (UID: \"b0049ce2-17f9-4372-a66e-7c03a3763460\") " pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.894455 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0049ce2-17f9-4372-a66e-7c03a3763460-public-tls-certs\") pod \"nova-api-0\" (UID: \"b0049ce2-17f9-4372-a66e-7c03a3763460\") " pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.894564 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z86zz\" (UniqueName: \"kubernetes.io/projected/b0049ce2-17f9-4372-a66e-7c03a3763460-kube-api-access-z86zz\") pod \"nova-api-0\" (UID: \"b0049ce2-17f9-4372-a66e-7c03a3763460\") " pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.894623 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0049ce2-17f9-4372-a66e-7c03a3763460-internal-tls-certs\") pod \"nova-api-0\" (UID: \"b0049ce2-17f9-4372-a66e-7c03a3763460\") " pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.899416 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0049ce2-17f9-4372-a66e-7c03a3763460-internal-tls-certs\") pod \"nova-api-0\" (UID: \"b0049ce2-17f9-4372-a66e-7c03a3763460\") " pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.901356 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0049ce2-17f9-4372-a66e-7c03a3763460-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b0049ce2-17f9-4372-a66e-7c03a3763460\") " pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.901633 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b0049ce2-17f9-4372-a66e-7c03a3763460-logs\") pod \"nova-api-0\" (UID: \"b0049ce2-17f9-4372-a66e-7c03a3763460\") " pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.903091 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0049ce2-17f9-4372-a66e-7c03a3763460-public-tls-certs\") pod \"nova-api-0\" (UID: \"b0049ce2-17f9-4372-a66e-7c03a3763460\") " pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.909278 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0049ce2-17f9-4372-a66e-7c03a3763460-config-data\") pod \"nova-api-0\" (UID: \"b0049ce2-17f9-4372-a66e-7c03a3763460\") " pod="openstack/nova-api-0" Nov 26 05:47:06 crc kubenswrapper[4871]: I1126 05:47:06.916267 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z86zz\" (UniqueName: \"kubernetes.io/projected/b0049ce2-17f9-4372-a66e-7c03a3763460-kube-api-access-z86zz\") pod \"nova-api-0\" (UID: \"b0049ce2-17f9-4372-a66e-7c03a3763460\") " pod="openstack/nova-api-0" Nov 26 05:47:07 crc kubenswrapper[4871]: I1126 05:47:07.013211 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 05:47:07 crc kubenswrapper[4871]: I1126 05:47:07.343680 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d8636384-aac2-4fd2-8f51-5cd6ca47c362","Type":"ContainerStarted","Data":"dc2da8e89f8e796e10352a953bc8692e6ba73b5f2f9d84a265c5821d9879c5b2"} Nov 26 05:47:07 crc kubenswrapper[4871]: I1126 05:47:07.343923 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d8636384-aac2-4fd2-8f51-5cd6ca47c362","Type":"ContainerStarted","Data":"49aaa84226953935a977da918b9f55acc21a28bf04d5cd4282dbb8610173d8e1"} Nov 26 05:47:07 crc kubenswrapper[4871]: I1126 05:47:07.343935 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d8636384-aac2-4fd2-8f51-5cd6ca47c362","Type":"ContainerStarted","Data":"709c8609cf7620804c1a0b0ba046ba980095dfa3d51e284028be1efee2cce8b1"} Nov 26 05:47:07 crc kubenswrapper[4871]: I1126 05:47:07.374099 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.374082369 podStartE2EDuration="2.374082369s" podCreationTimestamp="2025-11-26 05:47:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:47:07.37010398 +0000 UTC m=+1285.553155586" watchObservedRunningTime="2025-11-26 05:47:07.374082369 +0000 UTC m=+1285.557133955" Nov 26 05:47:07 crc kubenswrapper[4871]: I1126 05:47:07.550790 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 05:47:08 crc kubenswrapper[4871]: I1126 05:47:08.353391 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b0049ce2-17f9-4372-a66e-7c03a3763460","Type":"ContainerStarted","Data":"469747874d585a5e2b87b660437c161678cfce94369d3a68aea22289ad9810ce"} Nov 26 05:47:08 crc kubenswrapper[4871]: I1126 05:47:08.354003 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b0049ce2-17f9-4372-a66e-7c03a3763460","Type":"ContainerStarted","Data":"4d542a3f52007dd8a82fe381ae1cc21b7b1f9115eda470ed7338783c9ed5758f"} Nov 26 05:47:08 crc kubenswrapper[4871]: I1126 05:47:08.354028 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b0049ce2-17f9-4372-a66e-7c03a3763460","Type":"ContainerStarted","Data":"0d17053ccb9fba2fb18fddb6ec9134360d9916c72e8f06a3c3f0ba2cd35e442f"} Nov 26 05:47:08 crc kubenswrapper[4871]: I1126 05:47:08.377693 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.377670882 podStartE2EDuration="2.377670882s" podCreationTimestamp="2025-11-26 05:47:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:47:08.372570385 +0000 UTC m=+1286.555621991" watchObservedRunningTime="2025-11-26 05:47:08.377670882 +0000 UTC m=+1286.560722478" Nov 26 05:47:08 crc kubenswrapper[4871]: I1126 05:47:08.530522 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90" path="/var/lib/kubelet/pods/2ff1dd07-99f3-4937-b5e7-ce5fd6b72d90/volumes" Nov 26 05:47:09 crc kubenswrapper[4871]: I1126 05:47:09.722974 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 05:47:10 crc kubenswrapper[4871]: I1126 05:47:10.999578 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 05:47:10 crc kubenswrapper[4871]: I1126 05:47:10.999647 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 05:47:14 crc kubenswrapper[4871]: I1126 05:47:14.723216 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 05:47:14 crc kubenswrapper[4871]: I1126 05:47:14.783759 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 05:47:15 crc kubenswrapper[4871]: I1126 05:47:15.467447 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 05:47:16 crc kubenswrapper[4871]: I1126 05:47:16.000124 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 05:47:16 crc kubenswrapper[4871]: I1126 05:47:16.000489 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 05:47:17 crc kubenswrapper[4871]: I1126 05:47:17.012734 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d8636384-aac2-4fd2-8f51-5cd6ca47c362" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.226:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 05:47:17 crc kubenswrapper[4871]: I1126 05:47:17.012787 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d8636384-aac2-4fd2-8f51-5cd6ca47c362" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.226:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 05:47:17 crc kubenswrapper[4871]: I1126 05:47:17.014158 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 05:47:17 crc kubenswrapper[4871]: I1126 05:47:17.014192 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 05:47:18 crc kubenswrapper[4871]: I1126 05:47:18.030673 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b0049ce2-17f9-4372-a66e-7c03a3763460" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.227:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 05:47:18 crc kubenswrapper[4871]: I1126 05:47:18.030707 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b0049ce2-17f9-4372-a66e-7c03a3763460" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.227:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 05:47:21 crc kubenswrapper[4871]: I1126 05:47:21.490338 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 26 05:47:23 crc kubenswrapper[4871]: I1126 05:47:23.614755 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:47:23 crc kubenswrapper[4871]: I1126 05:47:23.614827 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:47:26 crc kubenswrapper[4871]: I1126 05:47:26.004628 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 05:47:26 crc kubenswrapper[4871]: I1126 05:47:26.006166 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 05:47:26 crc kubenswrapper[4871]: I1126 05:47:26.011755 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 05:47:26 crc kubenswrapper[4871]: I1126 05:47:26.572979 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 05:47:27 crc kubenswrapper[4871]: I1126 05:47:27.023686 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 05:47:27 crc kubenswrapper[4871]: I1126 05:47:27.024333 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 05:47:27 crc kubenswrapper[4871]: I1126 05:47:27.029058 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 05:47:27 crc kubenswrapper[4871]: I1126 05:47:27.041673 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 05:47:27 crc kubenswrapper[4871]: I1126 05:47:27.577404 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 05:47:27 crc kubenswrapper[4871]: I1126 05:47:27.586678 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 05:47:35 crc kubenswrapper[4871]: I1126 05:47:35.691263 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 05:47:37 crc kubenswrapper[4871]: I1126 05:47:37.329101 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 05:47:39 crc kubenswrapper[4871]: I1126 05:47:39.022565 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="4ba97673-d74c-47df-acae-f2dcc1ed10df" containerName="rabbitmq" containerID="cri-o://2ac192240d5ce8763cbcf70657fb48aeddf20da69468a9431bfba953d797d02f" gracePeriod=604797 Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.558144 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="b3f9dfba-a3a9-45ef-a96c-91c654671b97" containerName="rabbitmq" containerID="cri-o://513d02f3f345dbd6ce9c339ae25ddc4d8f3df0f1677b53fdda76444113568d54" gracePeriod=604797 Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.681775 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.769614 4871 generic.go:334] "Generic (PLEG): container finished" podID="4ba97673-d74c-47df-acae-f2dcc1ed10df" containerID="2ac192240d5ce8763cbcf70657fb48aeddf20da69468a9431bfba953d797d02f" exitCode=0 Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.769665 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4ba97673-d74c-47df-acae-f2dcc1ed10df","Type":"ContainerDied","Data":"2ac192240d5ce8763cbcf70657fb48aeddf20da69468a9431bfba953d797d02f"} Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.769695 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4ba97673-d74c-47df-acae-f2dcc1ed10df","Type":"ContainerDied","Data":"200f44a9a5f1171868c1d4dd4f350d989277beea7c8d27453120140cb3d6593c"} Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.769716 4871 scope.go:117] "RemoveContainer" containerID="2ac192240d5ce8763cbcf70657fb48aeddf20da69468a9431bfba953d797d02f" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.769901 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.802949 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4ba97673-d74c-47df-acae-f2dcc1ed10df-erlang-cookie-secret\") pod \"4ba97673-d74c-47df-acae-f2dcc1ed10df\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.803022 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-confd\") pod \"4ba97673-d74c-47df-acae-f2dcc1ed10df\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.803056 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-plugins\") pod \"4ba97673-d74c-47df-acae-f2dcc1ed10df\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.803077 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4ba97673-d74c-47df-acae-f2dcc1ed10df-config-data\") pod \"4ba97673-d74c-47df-acae-f2dcc1ed10df\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.803105 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4ba97673-d74c-47df-acae-f2dcc1ed10df-pod-info\") pod \"4ba97673-d74c-47df-acae-f2dcc1ed10df\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.803216 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4ba97673-d74c-47df-acae-f2dcc1ed10df-server-conf\") pod \"4ba97673-d74c-47df-acae-f2dcc1ed10df\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.803261 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4ba97673-d74c-47df-acae-f2dcc1ed10df-plugins-conf\") pod \"4ba97673-d74c-47df-acae-f2dcc1ed10df\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.803282 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-tls\") pod \"4ba97673-d74c-47df-acae-f2dcc1ed10df\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.803323 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cx7rd\" (UniqueName: \"kubernetes.io/projected/4ba97673-d74c-47df-acae-f2dcc1ed10df-kube-api-access-cx7rd\") pod \"4ba97673-d74c-47df-acae-f2dcc1ed10df\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.803375 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-erlang-cookie\") pod \"4ba97673-d74c-47df-acae-f2dcc1ed10df\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.803401 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"4ba97673-d74c-47df-acae-f2dcc1ed10df\" (UID: \"4ba97673-d74c-47df-acae-f2dcc1ed10df\") " Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.809148 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4ba97673-d74c-47df-acae-f2dcc1ed10df-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "4ba97673-d74c-47df-acae-f2dcc1ed10df" (UID: "4ba97673-d74c-47df-acae-f2dcc1ed10df"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.810610 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "4ba97673-d74c-47df-acae-f2dcc1ed10df" (UID: "4ba97673-d74c-47df-acae-f2dcc1ed10df"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.811318 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "4ba97673-d74c-47df-acae-f2dcc1ed10df" (UID: "4ba97673-d74c-47df-acae-f2dcc1ed10df"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.811874 4871 scope.go:117] "RemoveContainer" containerID="d56886216524c0c1586b2b6af70c6b9c3cb40243a032857c40c053aea9413760" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.812880 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ba97673-d74c-47df-acae-f2dcc1ed10df-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "4ba97673-d74c-47df-acae-f2dcc1ed10df" (UID: "4ba97673-d74c-47df-acae-f2dcc1ed10df"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.815031 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "persistence") pod "4ba97673-d74c-47df-acae-f2dcc1ed10df" (UID: "4ba97673-d74c-47df-acae-f2dcc1ed10df"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.818224 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/4ba97673-d74c-47df-acae-f2dcc1ed10df-pod-info" (OuterVolumeSpecName: "pod-info") pod "4ba97673-d74c-47df-acae-f2dcc1ed10df" (UID: "4ba97673-d74c-47df-acae-f2dcc1ed10df"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.819261 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ba97673-d74c-47df-acae-f2dcc1ed10df-kube-api-access-cx7rd" (OuterVolumeSpecName: "kube-api-access-cx7rd") pod "4ba97673-d74c-47df-acae-f2dcc1ed10df" (UID: "4ba97673-d74c-47df-acae-f2dcc1ed10df"). InnerVolumeSpecName "kube-api-access-cx7rd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.823728 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "4ba97673-d74c-47df-acae-f2dcc1ed10df" (UID: "4ba97673-d74c-47df-acae-f2dcc1ed10df"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.838327 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4ba97673-d74c-47df-acae-f2dcc1ed10df-config-data" (OuterVolumeSpecName: "config-data") pod "4ba97673-d74c-47df-acae-f2dcc1ed10df" (UID: "4ba97673-d74c-47df-acae-f2dcc1ed10df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.871208 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4ba97673-d74c-47df-acae-f2dcc1ed10df-server-conf" (OuterVolumeSpecName: "server-conf") pod "4ba97673-d74c-47df-acae-f2dcc1ed10df" (UID: "4ba97673-d74c-47df-acae-f2dcc1ed10df"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.905748 4871 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4ba97673-d74c-47df-acae-f2dcc1ed10df-server-conf\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.905788 4871 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4ba97673-d74c-47df-acae-f2dcc1ed10df-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.905803 4871 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.905826 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cx7rd\" (UniqueName: \"kubernetes.io/projected/4ba97673-d74c-47df-acae-f2dcc1ed10df-kube-api-access-cx7rd\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.905838 4871 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.905867 4871 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.905877 4871 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4ba97673-d74c-47df-acae-f2dcc1ed10df-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.905887 4871 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.905897 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4ba97673-d74c-47df-acae-f2dcc1ed10df-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.905908 4871 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4ba97673-d74c-47df-acae-f2dcc1ed10df-pod-info\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.927395 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "4ba97673-d74c-47df-acae-f2dcc1ed10df" (UID: "4ba97673-d74c-47df-acae-f2dcc1ed10df"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.942287 4871 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.944904 4871 scope.go:117] "RemoveContainer" containerID="2ac192240d5ce8763cbcf70657fb48aeddf20da69468a9431bfba953d797d02f" Nov 26 05:47:40 crc kubenswrapper[4871]: E1126 05:47:40.945344 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ac192240d5ce8763cbcf70657fb48aeddf20da69468a9431bfba953d797d02f\": container with ID starting with 2ac192240d5ce8763cbcf70657fb48aeddf20da69468a9431bfba953d797d02f not found: ID does not exist" containerID="2ac192240d5ce8763cbcf70657fb48aeddf20da69468a9431bfba953d797d02f" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.945369 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ac192240d5ce8763cbcf70657fb48aeddf20da69468a9431bfba953d797d02f"} err="failed to get container status \"2ac192240d5ce8763cbcf70657fb48aeddf20da69468a9431bfba953d797d02f\": rpc error: code = NotFound desc = could not find container \"2ac192240d5ce8763cbcf70657fb48aeddf20da69468a9431bfba953d797d02f\": container with ID starting with 2ac192240d5ce8763cbcf70657fb48aeddf20da69468a9431bfba953d797d02f not found: ID does not exist" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.945388 4871 scope.go:117] "RemoveContainer" containerID="d56886216524c0c1586b2b6af70c6b9c3cb40243a032857c40c053aea9413760" Nov 26 05:47:40 crc kubenswrapper[4871]: E1126 05:47:40.945749 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d56886216524c0c1586b2b6af70c6b9c3cb40243a032857c40c053aea9413760\": container with ID starting with d56886216524c0c1586b2b6af70c6b9c3cb40243a032857c40c053aea9413760 not found: ID does not exist" containerID="d56886216524c0c1586b2b6af70c6b9c3cb40243a032857c40c053aea9413760" Nov 26 05:47:40 crc kubenswrapper[4871]: I1126 05:47:40.945791 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d56886216524c0c1586b2b6af70c6b9c3cb40243a032857c40c053aea9413760"} err="failed to get container status \"d56886216524c0c1586b2b6af70c6b9c3cb40243a032857c40c053aea9413760\": rpc error: code = NotFound desc = could not find container \"d56886216524c0c1586b2b6af70c6b9c3cb40243a032857c40c053aea9413760\": container with ID starting with d56886216524c0c1586b2b6af70c6b9c3cb40243a032857c40c053aea9413760 not found: ID does not exist" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.007866 4871 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.007914 4871 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4ba97673-d74c-47df-acae-f2dcc1ed10df-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.103471 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.110937 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.138736 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 05:47:41 crc kubenswrapper[4871]: E1126 05:47:41.139189 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ba97673-d74c-47df-acae-f2dcc1ed10df" containerName="setup-container" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.139208 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ba97673-d74c-47df-acae-f2dcc1ed10df" containerName="setup-container" Nov 26 05:47:41 crc kubenswrapper[4871]: E1126 05:47:41.139244 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ba97673-d74c-47df-acae-f2dcc1ed10df" containerName="rabbitmq" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.139253 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ba97673-d74c-47df-acae-f2dcc1ed10df" containerName="rabbitmq" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.139575 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ba97673-d74c-47df-acae-f2dcc1ed10df" containerName="rabbitmq" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.140845 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.142657 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.142925 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-mqgn6" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.143634 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.143651 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.143877 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.144282 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.147367 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.159027 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.211624 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f823aa11-fe59-4296-9a43-81bfc1275737-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.211689 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkgm2\" (UniqueName: \"kubernetes.io/projected/f823aa11-fe59-4296-9a43-81bfc1275737-kube-api-access-wkgm2\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.211738 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f823aa11-fe59-4296-9a43-81bfc1275737-server-conf\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.211831 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f823aa11-fe59-4296-9a43-81bfc1275737-pod-info\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.211869 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f823aa11-fe59-4296-9a43-81bfc1275737-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.211908 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.211939 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f823aa11-fe59-4296-9a43-81bfc1275737-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.211977 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f823aa11-fe59-4296-9a43-81bfc1275737-config-data\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.212001 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f823aa11-fe59-4296-9a43-81bfc1275737-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.212084 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f823aa11-fe59-4296-9a43-81bfc1275737-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.212128 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f823aa11-fe59-4296-9a43-81bfc1275737-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.319023 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f823aa11-fe59-4296-9a43-81bfc1275737-config-data\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.319149 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f823aa11-fe59-4296-9a43-81bfc1275737-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.319256 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f823aa11-fe59-4296-9a43-81bfc1275737-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.319289 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f823aa11-fe59-4296-9a43-81bfc1275737-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.319330 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f823aa11-fe59-4296-9a43-81bfc1275737-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.319365 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkgm2\" (UniqueName: \"kubernetes.io/projected/f823aa11-fe59-4296-9a43-81bfc1275737-kube-api-access-wkgm2\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.319402 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f823aa11-fe59-4296-9a43-81bfc1275737-server-conf\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.319463 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f823aa11-fe59-4296-9a43-81bfc1275737-pod-info\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.319488 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.319509 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f823aa11-fe59-4296-9a43-81bfc1275737-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.319550 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f823aa11-fe59-4296-9a43-81bfc1275737-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.320099 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f823aa11-fe59-4296-9a43-81bfc1275737-config-data\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.320412 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f823aa11-fe59-4296-9a43-81bfc1275737-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.320567 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f823aa11-fe59-4296-9a43-81bfc1275737-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.320689 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f823aa11-fe59-4296-9a43-81bfc1275737-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.321116 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.322175 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f823aa11-fe59-4296-9a43-81bfc1275737-server-conf\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.323583 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f823aa11-fe59-4296-9a43-81bfc1275737-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.329065 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f823aa11-fe59-4296-9a43-81bfc1275737-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.329300 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f823aa11-fe59-4296-9a43-81bfc1275737-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.329421 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f823aa11-fe59-4296-9a43-81bfc1275737-pod-info\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.339208 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkgm2\" (UniqueName: \"kubernetes.io/projected/f823aa11-fe59-4296-9a43-81bfc1275737-kube-api-access-wkgm2\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.370697 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"f823aa11-fe59-4296-9a43-81bfc1275737\") " pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.462303 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 05:47:41 crc kubenswrapper[4871]: I1126 05:47:41.995212 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.120649 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.238093 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b3f9dfba-a3a9-45ef-a96c-91c654671b97-pod-info\") pod \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.238393 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfbqb\" (UniqueName: \"kubernetes.io/projected/b3f9dfba-a3a9-45ef-a96c-91c654671b97-kube-api-access-kfbqb\") pod \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.238453 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-erlang-cookie\") pod \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.238476 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b3f9dfba-a3a9-45ef-a96c-91c654671b97-plugins-conf\") pod \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.238498 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-confd\") pod \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.238520 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3f9dfba-a3a9-45ef-a96c-91c654671b97-config-data\") pod \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.238558 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-plugins\") pod \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.238592 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b3f9dfba-a3a9-45ef-a96c-91c654671b97-erlang-cookie-secret\") pod \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.238632 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-tls\") pod \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.238677 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.238723 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b3f9dfba-a3a9-45ef-a96c-91c654671b97-server-conf\") pod \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\" (UID: \"b3f9dfba-a3a9-45ef-a96c-91c654671b97\") " Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.240919 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "b3f9dfba-a3a9-45ef-a96c-91c654671b97" (UID: "b3f9dfba-a3a9-45ef-a96c-91c654671b97"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.242144 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3f9dfba-a3a9-45ef-a96c-91c654671b97-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "b3f9dfba-a3a9-45ef-a96c-91c654671b97" (UID: "b3f9dfba-a3a9-45ef-a96c-91c654671b97"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.250310 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "b3f9dfba-a3a9-45ef-a96c-91c654671b97" (UID: "b3f9dfba-a3a9-45ef-a96c-91c654671b97"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.251728 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/b3f9dfba-a3a9-45ef-a96c-91c654671b97-pod-info" (OuterVolumeSpecName: "pod-info") pod "b3f9dfba-a3a9-45ef-a96c-91c654671b97" (UID: "b3f9dfba-a3a9-45ef-a96c-91c654671b97"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.261384 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3f9dfba-a3a9-45ef-a96c-91c654671b97-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "b3f9dfba-a3a9-45ef-a96c-91c654671b97" (UID: "b3f9dfba-a3a9-45ef-a96c-91c654671b97"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.261768 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "b3f9dfba-a3a9-45ef-a96c-91c654671b97" (UID: "b3f9dfba-a3a9-45ef-a96c-91c654671b97"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.262250 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3f9dfba-a3a9-45ef-a96c-91c654671b97-kube-api-access-kfbqb" (OuterVolumeSpecName: "kube-api-access-kfbqb") pod "b3f9dfba-a3a9-45ef-a96c-91c654671b97" (UID: "b3f9dfba-a3a9-45ef-a96c-91c654671b97"). InnerVolumeSpecName "kube-api-access-kfbqb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.262382 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "persistence") pod "b3f9dfba-a3a9-45ef-a96c-91c654671b97" (UID: "b3f9dfba-a3a9-45ef-a96c-91c654671b97"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.308516 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3f9dfba-a3a9-45ef-a96c-91c654671b97-config-data" (OuterVolumeSpecName: "config-data") pod "b3f9dfba-a3a9-45ef-a96c-91c654671b97" (UID: "b3f9dfba-a3a9-45ef-a96c-91c654671b97"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.344925 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3f9dfba-a3a9-45ef-a96c-91c654671b97-server-conf" (OuterVolumeSpecName: "server-conf") pod "b3f9dfba-a3a9-45ef-a96c-91c654671b97" (UID: "b3f9dfba-a3a9-45ef-a96c-91c654671b97"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.345906 4871 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b3f9dfba-a3a9-45ef-a96c-91c654671b97-pod-info\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.345939 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfbqb\" (UniqueName: \"kubernetes.io/projected/b3f9dfba-a3a9-45ef-a96c-91c654671b97-kube-api-access-kfbqb\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.345951 4871 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.345960 4871 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b3f9dfba-a3a9-45ef-a96c-91c654671b97-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.345969 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b3f9dfba-a3a9-45ef-a96c-91c654671b97-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.345976 4871 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.345984 4871 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b3f9dfba-a3a9-45ef-a96c-91c654671b97-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.345994 4871 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.346013 4871 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.346022 4871 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b3f9dfba-a3a9-45ef-a96c-91c654671b97-server-conf\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.366221 4871 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.418003 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "b3f9dfba-a3a9-45ef-a96c-91c654671b97" (UID: "b3f9dfba-a3a9-45ef-a96c-91c654671b97"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.448077 4871 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b3f9dfba-a3a9-45ef-a96c-91c654671b97-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.448108 4871 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.520361 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ba97673-d74c-47df-acae-f2dcc1ed10df" path="/var/lib/kubelet/pods/4ba97673-d74c-47df-acae-f2dcc1ed10df/volumes" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.793683 4871 generic.go:334] "Generic (PLEG): container finished" podID="b3f9dfba-a3a9-45ef-a96c-91c654671b97" containerID="513d02f3f345dbd6ce9c339ae25ddc4d8f3df0f1677b53fdda76444113568d54" exitCode=0 Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.793757 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.793789 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b3f9dfba-a3a9-45ef-a96c-91c654671b97","Type":"ContainerDied","Data":"513d02f3f345dbd6ce9c339ae25ddc4d8f3df0f1677b53fdda76444113568d54"} Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.793847 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b3f9dfba-a3a9-45ef-a96c-91c654671b97","Type":"ContainerDied","Data":"519b591f0063f72f1e7c0b2b265e922312e11ab5d5648d0c0a804fc2157c27ae"} Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.793873 4871 scope.go:117] "RemoveContainer" containerID="513d02f3f345dbd6ce9c339ae25ddc4d8f3df0f1677b53fdda76444113568d54" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.800963 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f823aa11-fe59-4296-9a43-81bfc1275737","Type":"ContainerStarted","Data":"ee43eff531dd3797ca18ea756234c16df48865ec6ba556688068848bd75fc9cc"} Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.823676 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.832686 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.836282 4871 scope.go:117] "RemoveContainer" containerID="7a6e02e0a5f846c42f26119c46b3293cb428ccde51bfa28d8eb0ed9051685927" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.868345 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 05:47:42 crc kubenswrapper[4871]: E1126 05:47:42.869905 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3f9dfba-a3a9-45ef-a96c-91c654671b97" containerName="setup-container" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.869954 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3f9dfba-a3a9-45ef-a96c-91c654671b97" containerName="setup-container" Nov 26 05:47:42 crc kubenswrapper[4871]: E1126 05:47:42.869965 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3f9dfba-a3a9-45ef-a96c-91c654671b97" containerName="rabbitmq" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.869973 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3f9dfba-a3a9-45ef-a96c-91c654671b97" containerName="rabbitmq" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.870284 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3f9dfba-a3a9-45ef-a96c-91c654671b97" containerName="rabbitmq" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.871623 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.878741 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.879024 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.879238 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.879419 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-sw6fb" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.879861 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.880153 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.880479 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.902049 4871 scope.go:117] "RemoveContainer" containerID="513d02f3f345dbd6ce9c339ae25ddc4d8f3df0f1677b53fdda76444113568d54" Nov 26 05:47:42 crc kubenswrapper[4871]: E1126 05:47:42.902696 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"513d02f3f345dbd6ce9c339ae25ddc4d8f3df0f1677b53fdda76444113568d54\": container with ID starting with 513d02f3f345dbd6ce9c339ae25ddc4d8f3df0f1677b53fdda76444113568d54 not found: ID does not exist" containerID="513d02f3f345dbd6ce9c339ae25ddc4d8f3df0f1677b53fdda76444113568d54" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.902728 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"513d02f3f345dbd6ce9c339ae25ddc4d8f3df0f1677b53fdda76444113568d54"} err="failed to get container status \"513d02f3f345dbd6ce9c339ae25ddc4d8f3df0f1677b53fdda76444113568d54\": rpc error: code = NotFound desc = could not find container \"513d02f3f345dbd6ce9c339ae25ddc4d8f3df0f1677b53fdda76444113568d54\": container with ID starting with 513d02f3f345dbd6ce9c339ae25ddc4d8f3df0f1677b53fdda76444113568d54 not found: ID does not exist" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.902752 4871 scope.go:117] "RemoveContainer" containerID="7a6e02e0a5f846c42f26119c46b3293cb428ccde51bfa28d8eb0ed9051685927" Nov 26 05:47:42 crc kubenswrapper[4871]: E1126 05:47:42.903063 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a6e02e0a5f846c42f26119c46b3293cb428ccde51bfa28d8eb0ed9051685927\": container with ID starting with 7a6e02e0a5f846c42f26119c46b3293cb428ccde51bfa28d8eb0ed9051685927 not found: ID does not exist" containerID="7a6e02e0a5f846c42f26119c46b3293cb428ccde51bfa28d8eb0ed9051685927" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.903088 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a6e02e0a5f846c42f26119c46b3293cb428ccde51bfa28d8eb0ed9051685927"} err="failed to get container status \"7a6e02e0a5f846c42f26119c46b3293cb428ccde51bfa28d8eb0ed9051685927\": rpc error: code = NotFound desc = could not find container \"7a6e02e0a5f846c42f26119c46b3293cb428ccde51bfa28d8eb0ed9051685927\": container with ID starting with 7a6e02e0a5f846c42f26119c46b3293cb428ccde51bfa28d8eb0ed9051685927 not found: ID does not exist" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.917881 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.958567 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c02a9e9c-8083-4903-a64d-a140b1c9c143-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.958630 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c02a9e9c-8083-4903-a64d-a140b1c9c143-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.958660 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c02a9e9c-8083-4903-a64d-a140b1c9c143-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.958698 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c02a9e9c-8083-4903-a64d-a140b1c9c143-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.958778 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c02a9e9c-8083-4903-a64d-a140b1c9c143-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.958812 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c02a9e9c-8083-4903-a64d-a140b1c9c143-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.958840 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c02a9e9c-8083-4903-a64d-a140b1c9c143-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.958860 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.958885 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c02a9e9c-8083-4903-a64d-a140b1c9c143-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.958926 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c02a9e9c-8083-4903-a64d-a140b1c9c143-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:42 crc kubenswrapper[4871]: I1126 05:47:42.958951 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d26lw\" (UniqueName: \"kubernetes.io/projected/c02a9e9c-8083-4903-a64d-a140b1c9c143-kube-api-access-d26lw\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.061146 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c02a9e9c-8083-4903-a64d-a140b1c9c143-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.061229 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c02a9e9c-8083-4903-a64d-a140b1c9c143-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.061277 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.061307 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c02a9e9c-8083-4903-a64d-a140b1c9c143-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.061344 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c02a9e9c-8083-4903-a64d-a140b1c9c143-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.061409 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c02a9e9c-8083-4903-a64d-a140b1c9c143-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.061453 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d26lw\" (UniqueName: \"kubernetes.io/projected/c02a9e9c-8083-4903-a64d-a140b1c9c143-kube-api-access-d26lw\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.061513 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c02a9e9c-8083-4903-a64d-a140b1c9c143-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.061562 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.061697 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c02a9e9c-8083-4903-a64d-a140b1c9c143-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.061767 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c02a9e9c-8083-4903-a64d-a140b1c9c143-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.061842 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c02a9e9c-8083-4903-a64d-a140b1c9c143-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.062304 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c02a9e9c-8083-4903-a64d-a140b1c9c143-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.062387 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c02a9e9c-8083-4903-a64d-a140b1c9c143-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.062438 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c02a9e9c-8083-4903-a64d-a140b1c9c143-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.063146 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c02a9e9c-8083-4903-a64d-a140b1c9c143-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.063781 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c02a9e9c-8083-4903-a64d-a140b1c9c143-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.067497 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c02a9e9c-8083-4903-a64d-a140b1c9c143-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.068188 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c02a9e9c-8083-4903-a64d-a140b1c9c143-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.068761 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c02a9e9c-8083-4903-a64d-a140b1c9c143-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.068958 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c02a9e9c-8083-4903-a64d-a140b1c9c143-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.079717 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d26lw\" (UniqueName: \"kubernetes.io/projected/c02a9e9c-8083-4903-a64d-a140b1c9c143-kube-api-access-d26lw\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.101964 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c02a9e9c-8083-4903-a64d-a140b1c9c143\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.242139 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.813691 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f823aa11-fe59-4296-9a43-81bfc1275737","Type":"ContainerStarted","Data":"3cf1135bb2eb2f2f185d3b14b1219e6aa61b5f6ff5cbd2f9d68d34bd8a1cc20f"} Nov 26 05:47:43 crc kubenswrapper[4871]: W1126 05:47:43.840780 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc02a9e9c_8083_4903_a64d_a140b1c9c143.slice/crio-20b319d390a829a79ea6ca1e870d6a93da355328beecf4874e558db41803f7e2 WatchSource:0}: Error finding container 20b319d390a829a79ea6ca1e870d6a93da355328beecf4874e558db41803f7e2: Status 404 returned error can't find the container with id 20b319d390a829a79ea6ca1e870d6a93da355328beecf4874e558db41803f7e2 Nov 26 05:47:43 crc kubenswrapper[4871]: I1126 05:47:43.845756 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 05:47:44 crc kubenswrapper[4871]: I1126 05:47:44.526852 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3f9dfba-a3a9-45ef-a96c-91c654671b97" path="/var/lib/kubelet/pods/b3f9dfba-a3a9-45ef-a96c-91c654671b97/volumes" Nov 26 05:47:44 crc kubenswrapper[4871]: I1126 05:47:44.831832 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c02a9e9c-8083-4903-a64d-a140b1c9c143","Type":"ContainerStarted","Data":"20b319d390a829a79ea6ca1e870d6a93da355328beecf4874e558db41803f7e2"} Nov 26 05:47:45 crc kubenswrapper[4871]: I1126 05:47:45.845597 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c02a9e9c-8083-4903-a64d-a140b1c9c143","Type":"ContainerStarted","Data":"4020b448e3b34785b012f6040bdd0d1d9cf83b40c73eec3be5a077fe2ad01c23"} Nov 26 05:47:46 crc kubenswrapper[4871]: E1126 05:47:46.326056 4871 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3f9dfba_a3a9_45ef_a96c_91c654671b97.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3f9dfba_a3a9_45ef_a96c_91c654671b97.slice/crio-519b591f0063f72f1e7c0b2b265e922312e11ab5d5648d0c0a804fc2157c27ae\": RecentStats: unable to find data in memory cache]" Nov 26 05:47:47 crc kubenswrapper[4871]: I1126 05:47:47.081638 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="b3f9dfba-a3a9-45ef-a96c-91c654671b97" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.110:5671: i/o timeout" Nov 26 05:47:49 crc kubenswrapper[4871]: I1126 05:47:49.801782 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bf6c7df67-n7rdc"] Nov 26 05:47:49 crc kubenswrapper[4871]: I1126 05:47:49.804380 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:49 crc kubenswrapper[4871]: I1126 05:47:49.806874 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 26 05:47:49 crc kubenswrapper[4871]: I1126 05:47:49.825033 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bf6c7df67-n7rdc"] Nov 26 05:47:49 crc kubenswrapper[4871]: I1126 05:47:49.901182 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-ovsdbserver-nb\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:49 crc kubenswrapper[4871]: I1126 05:47:49.901264 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-dns-swift-storage-0\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:49 crc kubenswrapper[4871]: I1126 05:47:49.901302 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mx8mk\" (UniqueName: \"kubernetes.io/projected/16828cd5-780c-470d-9c81-fdea2edb1f0e-kube-api-access-mx8mk\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:49 crc kubenswrapper[4871]: I1126 05:47:49.901553 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-dns-svc\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:49 crc kubenswrapper[4871]: I1126 05:47:49.901654 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-openstack-edpm-ipam\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:49 crc kubenswrapper[4871]: I1126 05:47:49.901794 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-ovsdbserver-sb\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:49 crc kubenswrapper[4871]: I1126 05:47:49.901816 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-config\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:50 crc kubenswrapper[4871]: I1126 05:47:50.003957 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-ovsdbserver-sb\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:50 crc kubenswrapper[4871]: I1126 05:47:50.004012 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-config\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:50 crc kubenswrapper[4871]: I1126 05:47:50.004045 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-ovsdbserver-nb\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:50 crc kubenswrapper[4871]: I1126 05:47:50.004082 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-dns-swift-storage-0\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:50 crc kubenswrapper[4871]: I1126 05:47:50.004118 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mx8mk\" (UniqueName: \"kubernetes.io/projected/16828cd5-780c-470d-9c81-fdea2edb1f0e-kube-api-access-mx8mk\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:50 crc kubenswrapper[4871]: I1126 05:47:50.004175 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-dns-svc\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:50 crc kubenswrapper[4871]: I1126 05:47:50.004212 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-openstack-edpm-ipam\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:50 crc kubenswrapper[4871]: I1126 05:47:50.004930 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-config\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:50 crc kubenswrapper[4871]: I1126 05:47:50.005182 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-ovsdbserver-sb\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:50 crc kubenswrapper[4871]: I1126 05:47:50.005194 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-dns-svc\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:50 crc kubenswrapper[4871]: I1126 05:47:50.005195 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-ovsdbserver-nb\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:50 crc kubenswrapper[4871]: I1126 05:47:50.005497 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-openstack-edpm-ipam\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:50 crc kubenswrapper[4871]: I1126 05:47:50.006009 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-dns-swift-storage-0\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:50 crc kubenswrapper[4871]: I1126 05:47:50.026826 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mx8mk\" (UniqueName: \"kubernetes.io/projected/16828cd5-780c-470d-9c81-fdea2edb1f0e-kube-api-access-mx8mk\") pod \"dnsmasq-dns-bf6c7df67-n7rdc\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:50 crc kubenswrapper[4871]: I1126 05:47:50.126083 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:50 crc kubenswrapper[4871]: I1126 05:47:50.561883 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bf6c7df67-n7rdc"] Nov 26 05:47:50 crc kubenswrapper[4871]: I1126 05:47:50.895011 4871 generic.go:334] "Generic (PLEG): container finished" podID="16828cd5-780c-470d-9c81-fdea2edb1f0e" containerID="807e869c508e7cde283c03aaac60ba260128a21141f3ae62637e50b4d3eab3c1" exitCode=0 Nov 26 05:47:50 crc kubenswrapper[4871]: I1126 05:47:50.895100 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" event={"ID":"16828cd5-780c-470d-9c81-fdea2edb1f0e","Type":"ContainerDied","Data":"807e869c508e7cde283c03aaac60ba260128a21141f3ae62637e50b4d3eab3c1"} Nov 26 05:47:50 crc kubenswrapper[4871]: I1126 05:47:50.895367 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" event={"ID":"16828cd5-780c-470d-9c81-fdea2edb1f0e","Type":"ContainerStarted","Data":"b773921d991064bffb60bcd0331978d32bc7f12425190c4969acd41ea3bb4302"} Nov 26 05:47:51 crc kubenswrapper[4871]: I1126 05:47:51.931388 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" event={"ID":"16828cd5-780c-470d-9c81-fdea2edb1f0e","Type":"ContainerStarted","Data":"f75af4093beff797fe163d2165429d718fbcccdd2a8113a7c5f3ef5a0e5e42b8"} Nov 26 05:47:51 crc kubenswrapper[4871]: I1126 05:47:51.975071 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" podStartSLOduration=2.975047211 podStartE2EDuration="2.975047211s" podCreationTimestamp="2025-11-26 05:47:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:47:51.965094919 +0000 UTC m=+1330.148146525" watchObservedRunningTime="2025-11-26 05:47:51.975047211 +0000 UTC m=+1330.158098807" Nov 26 05:47:52 crc kubenswrapper[4871]: I1126 05:47:52.943676 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:47:53 crc kubenswrapper[4871]: I1126 05:47:53.614469 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:47:53 crc kubenswrapper[4871]: I1126 05:47:53.614581 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:47:56 crc kubenswrapper[4871]: E1126 05:47:56.659883 4871 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3f9dfba_a3a9_45ef_a96c_91c654671b97.slice/crio-519b591f0063f72f1e7c0b2b265e922312e11ab5d5648d0c0a804fc2157c27ae\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3f9dfba_a3a9_45ef_a96c_91c654671b97.slice\": RecentStats: unable to find data in memory cache]" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.127895 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.230939 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54599d8f7-7gq8f"] Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.231225 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" podUID="6d11cc8a-2c3e-421f-a156-0a811156876e" containerName="dnsmasq-dns" containerID="cri-o://899ee11002f9c3b4772ed8e8a230b29f0a832de03867a0927e557913eb973a84" gracePeriod=10 Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.445595 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77b58f4b85-prlhs"] Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.448564 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.479322 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" podUID="6d11cc8a-2c3e-421f-a156-0a811156876e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.221:5353: connect: connection refused" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.487960 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77b58f4b85-prlhs"] Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.542301 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7ea434ed-7152-4539-9589-d743e9d5b6c5-ovsdbserver-sb\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.542426 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9s2cf\" (UniqueName: \"kubernetes.io/projected/7ea434ed-7152-4539-9589-d743e9d5b6c5-kube-api-access-9s2cf\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.542505 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ea434ed-7152-4539-9589-d743e9d5b6c5-config\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.542562 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7ea434ed-7152-4539-9589-d743e9d5b6c5-dns-swift-storage-0\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.542582 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ea434ed-7152-4539-9589-d743e9d5b6c5-dns-svc\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.542605 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/7ea434ed-7152-4539-9589-d743e9d5b6c5-openstack-edpm-ipam\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.542729 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7ea434ed-7152-4539-9589-d743e9d5b6c5-ovsdbserver-nb\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.644793 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7ea434ed-7152-4539-9589-d743e9d5b6c5-ovsdbserver-nb\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.644883 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7ea434ed-7152-4539-9589-d743e9d5b6c5-ovsdbserver-sb\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.644961 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9s2cf\" (UniqueName: \"kubernetes.io/projected/7ea434ed-7152-4539-9589-d743e9d5b6c5-kube-api-access-9s2cf\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.645050 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ea434ed-7152-4539-9589-d743e9d5b6c5-config\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.645075 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7ea434ed-7152-4539-9589-d743e9d5b6c5-dns-swift-storage-0\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.645091 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ea434ed-7152-4539-9589-d743e9d5b6c5-dns-svc\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.645116 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/7ea434ed-7152-4539-9589-d743e9d5b6c5-openstack-edpm-ipam\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.646021 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/7ea434ed-7152-4539-9589-d743e9d5b6c5-openstack-edpm-ipam\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.646048 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ea434ed-7152-4539-9589-d743e9d5b6c5-dns-svc\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.646115 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7ea434ed-7152-4539-9589-d743e9d5b6c5-dns-swift-storage-0\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.646393 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7ea434ed-7152-4539-9589-d743e9d5b6c5-ovsdbserver-nb\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.646647 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7ea434ed-7152-4539-9589-d743e9d5b6c5-ovsdbserver-sb\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.646733 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ea434ed-7152-4539-9589-d743e9d5b6c5-config\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.670371 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9s2cf\" (UniqueName: \"kubernetes.io/projected/7ea434ed-7152-4539-9589-d743e9d5b6c5-kube-api-access-9s2cf\") pod \"dnsmasq-dns-77b58f4b85-prlhs\" (UID: \"7ea434ed-7152-4539-9589-d743e9d5b6c5\") " pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.779795 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.797884 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.848468 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-ovsdbserver-sb\") pod \"6d11cc8a-2c3e-421f-a156-0a811156876e\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.848821 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-config\") pod \"6d11cc8a-2c3e-421f-a156-0a811156876e\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.848871 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-dns-swift-storage-0\") pod \"6d11cc8a-2c3e-421f-a156-0a811156876e\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.848915 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqw68\" (UniqueName: \"kubernetes.io/projected/6d11cc8a-2c3e-421f-a156-0a811156876e-kube-api-access-gqw68\") pod \"6d11cc8a-2c3e-421f-a156-0a811156876e\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.848979 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-ovsdbserver-nb\") pod \"6d11cc8a-2c3e-421f-a156-0a811156876e\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.849113 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-dns-svc\") pod \"6d11cc8a-2c3e-421f-a156-0a811156876e\" (UID: \"6d11cc8a-2c3e-421f-a156-0a811156876e\") " Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.864971 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d11cc8a-2c3e-421f-a156-0a811156876e-kube-api-access-gqw68" (OuterVolumeSpecName: "kube-api-access-gqw68") pod "6d11cc8a-2c3e-421f-a156-0a811156876e" (UID: "6d11cc8a-2c3e-421f-a156-0a811156876e"). InnerVolumeSpecName "kube-api-access-gqw68". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.928513 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6d11cc8a-2c3e-421f-a156-0a811156876e" (UID: "6d11cc8a-2c3e-421f-a156-0a811156876e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.928818 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6d11cc8a-2c3e-421f-a156-0a811156876e" (UID: "6d11cc8a-2c3e-421f-a156-0a811156876e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.944518 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6d11cc8a-2c3e-421f-a156-0a811156876e" (UID: "6d11cc8a-2c3e-421f-a156-0a811156876e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.949126 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6d11cc8a-2c3e-421f-a156-0a811156876e" (UID: "6d11cc8a-2c3e-421f-a156-0a811156876e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.951639 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.951670 4871 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.951681 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqw68\" (UniqueName: \"kubernetes.io/projected/6d11cc8a-2c3e-421f-a156-0a811156876e-kube-api-access-gqw68\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.951691 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.951702 4871 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:00 crc kubenswrapper[4871]: I1126 05:48:00.970857 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-config" (OuterVolumeSpecName: "config") pod "6d11cc8a-2c3e-421f-a156-0a811156876e" (UID: "6d11cc8a-2c3e-421f-a156-0a811156876e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:48:01 crc kubenswrapper[4871]: I1126 05:48:01.033312 4871 generic.go:334] "Generic (PLEG): container finished" podID="6d11cc8a-2c3e-421f-a156-0a811156876e" containerID="899ee11002f9c3b4772ed8e8a230b29f0a832de03867a0927e557913eb973a84" exitCode=0 Nov 26 05:48:01 crc kubenswrapper[4871]: I1126 05:48:01.033361 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" event={"ID":"6d11cc8a-2c3e-421f-a156-0a811156876e","Type":"ContainerDied","Data":"899ee11002f9c3b4772ed8e8a230b29f0a832de03867a0927e557913eb973a84"} Nov 26 05:48:01 crc kubenswrapper[4871]: I1126 05:48:01.033407 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" event={"ID":"6d11cc8a-2c3e-421f-a156-0a811156876e","Type":"ContainerDied","Data":"2e5534759199e0250d3d6edd4d8b4c5ec22bd95fea617cc6dbb36d99dd6f7289"} Nov 26 05:48:01 crc kubenswrapper[4871]: I1126 05:48:01.033426 4871 scope.go:117] "RemoveContainer" containerID="899ee11002f9c3b4772ed8e8a230b29f0a832de03867a0927e557913eb973a84" Nov 26 05:48:01 crc kubenswrapper[4871]: I1126 05:48:01.033439 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54599d8f7-7gq8f" Nov 26 05:48:01 crc kubenswrapper[4871]: I1126 05:48:01.053519 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d11cc8a-2c3e-421f-a156-0a811156876e-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:01 crc kubenswrapper[4871]: I1126 05:48:01.069110 4871 scope.go:117] "RemoveContainer" containerID="c4835066071e17315a28af1900a77408b1670e16c55821678df6d4ceb007144b" Nov 26 05:48:01 crc kubenswrapper[4871]: I1126 05:48:01.095172 4871 scope.go:117] "RemoveContainer" containerID="899ee11002f9c3b4772ed8e8a230b29f0a832de03867a0927e557913eb973a84" Nov 26 05:48:01 crc kubenswrapper[4871]: E1126 05:48:01.095727 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"899ee11002f9c3b4772ed8e8a230b29f0a832de03867a0927e557913eb973a84\": container with ID starting with 899ee11002f9c3b4772ed8e8a230b29f0a832de03867a0927e557913eb973a84 not found: ID does not exist" containerID="899ee11002f9c3b4772ed8e8a230b29f0a832de03867a0927e557913eb973a84" Nov 26 05:48:01 crc kubenswrapper[4871]: I1126 05:48:01.095751 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"899ee11002f9c3b4772ed8e8a230b29f0a832de03867a0927e557913eb973a84"} err="failed to get container status \"899ee11002f9c3b4772ed8e8a230b29f0a832de03867a0927e557913eb973a84\": rpc error: code = NotFound desc = could not find container \"899ee11002f9c3b4772ed8e8a230b29f0a832de03867a0927e557913eb973a84\": container with ID starting with 899ee11002f9c3b4772ed8e8a230b29f0a832de03867a0927e557913eb973a84 not found: ID does not exist" Nov 26 05:48:01 crc kubenswrapper[4871]: I1126 05:48:01.095791 4871 scope.go:117] "RemoveContainer" containerID="c4835066071e17315a28af1900a77408b1670e16c55821678df6d4ceb007144b" Nov 26 05:48:01 crc kubenswrapper[4871]: E1126 05:48:01.096100 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4835066071e17315a28af1900a77408b1670e16c55821678df6d4ceb007144b\": container with ID starting with c4835066071e17315a28af1900a77408b1670e16c55821678df6d4ceb007144b not found: ID does not exist" containerID="c4835066071e17315a28af1900a77408b1670e16c55821678df6d4ceb007144b" Nov 26 05:48:01 crc kubenswrapper[4871]: I1126 05:48:01.096124 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4835066071e17315a28af1900a77408b1670e16c55821678df6d4ceb007144b"} err="failed to get container status \"c4835066071e17315a28af1900a77408b1670e16c55821678df6d4ceb007144b\": rpc error: code = NotFound desc = could not find container \"c4835066071e17315a28af1900a77408b1670e16c55821678df6d4ceb007144b\": container with ID starting with c4835066071e17315a28af1900a77408b1670e16c55821678df6d4ceb007144b not found: ID does not exist" Nov 26 05:48:01 crc kubenswrapper[4871]: I1126 05:48:01.109223 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54599d8f7-7gq8f"] Nov 26 05:48:01 crc kubenswrapper[4871]: I1126 05:48:01.121202 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-54599d8f7-7gq8f"] Nov 26 05:48:01 crc kubenswrapper[4871]: I1126 05:48:01.334402 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77b58f4b85-prlhs"] Nov 26 05:48:02 crc kubenswrapper[4871]: I1126 05:48:02.043993 4871 generic.go:334] "Generic (PLEG): container finished" podID="7ea434ed-7152-4539-9589-d743e9d5b6c5" containerID="d22c8e98a012fd9b43ac27cb2aaae180aab929914007571c03efb4f3a56ead7a" exitCode=0 Nov 26 05:48:02 crc kubenswrapper[4871]: I1126 05:48:02.044043 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" event={"ID":"7ea434ed-7152-4539-9589-d743e9d5b6c5","Type":"ContainerDied","Data":"d22c8e98a012fd9b43ac27cb2aaae180aab929914007571c03efb4f3a56ead7a"} Nov 26 05:48:02 crc kubenswrapper[4871]: I1126 05:48:02.044367 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" event={"ID":"7ea434ed-7152-4539-9589-d743e9d5b6c5","Type":"ContainerStarted","Data":"340fbc931e1413dc99162cbb4cb3dcb7ef6c0a817e3c384251d2b7f7317d286d"} Nov 26 05:48:02 crc kubenswrapper[4871]: I1126 05:48:02.521091 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d11cc8a-2c3e-421f-a156-0a811156876e" path="/var/lib/kubelet/pods/6d11cc8a-2c3e-421f-a156-0a811156876e/volumes" Nov 26 05:48:03 crc kubenswrapper[4871]: I1126 05:48:03.066218 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" event={"ID":"7ea434ed-7152-4539-9589-d743e9d5b6c5","Type":"ContainerStarted","Data":"ff982b1e8f1b923200a8e2d4d0e2d25e420aae957163521e4ff3f409a3da7faa"} Nov 26 05:48:03 crc kubenswrapper[4871]: I1126 05:48:03.066567 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:03 crc kubenswrapper[4871]: I1126 05:48:03.108167 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" podStartSLOduration=3.108145528 podStartE2EDuration="3.108145528s" podCreationTimestamp="2025-11-26 05:48:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:48:03.096218017 +0000 UTC m=+1341.279269643" watchObservedRunningTime="2025-11-26 05:48:03.108145528 +0000 UTC m=+1341.291197124" Nov 26 05:48:06 crc kubenswrapper[4871]: E1126 05:48:06.965551 4871 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3f9dfba_a3a9_45ef_a96c_91c654671b97.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3f9dfba_a3a9_45ef_a96c_91c654671b97.slice/crio-519b591f0063f72f1e7c0b2b265e922312e11ab5d5648d0c0a804fc2157c27ae\": RecentStats: unable to find data in memory cache]" Nov 26 05:48:10 crc kubenswrapper[4871]: I1126 05:48:10.781931 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-77b58f4b85-prlhs" Nov 26 05:48:10 crc kubenswrapper[4871]: I1126 05:48:10.895695 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bf6c7df67-n7rdc"] Nov 26 05:48:10 crc kubenswrapper[4871]: I1126 05:48:10.896195 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" podUID="16828cd5-780c-470d-9c81-fdea2edb1f0e" containerName="dnsmasq-dns" containerID="cri-o://f75af4093beff797fe163d2165429d718fbcccdd2a8113a7c5f3ef5a0e5e42b8" gracePeriod=10 Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.206755 4871 generic.go:334] "Generic (PLEG): container finished" podID="16828cd5-780c-470d-9c81-fdea2edb1f0e" containerID="f75af4093beff797fe163d2165429d718fbcccdd2a8113a7c5f3ef5a0e5e42b8" exitCode=0 Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.206798 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" event={"ID":"16828cd5-780c-470d-9c81-fdea2edb1f0e","Type":"ContainerDied","Data":"f75af4093beff797fe163d2165429d718fbcccdd2a8113a7c5f3ef5a0e5e42b8"} Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.401106 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.586012 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-ovsdbserver-sb\") pod \"16828cd5-780c-470d-9c81-fdea2edb1f0e\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.586282 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-dns-svc\") pod \"16828cd5-780c-470d-9c81-fdea2edb1f0e\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.586374 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-openstack-edpm-ipam\") pod \"16828cd5-780c-470d-9c81-fdea2edb1f0e\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.586486 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-ovsdbserver-nb\") pod \"16828cd5-780c-470d-9c81-fdea2edb1f0e\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.587230 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-config\") pod \"16828cd5-780c-470d-9c81-fdea2edb1f0e\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.587296 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mx8mk\" (UniqueName: \"kubernetes.io/projected/16828cd5-780c-470d-9c81-fdea2edb1f0e-kube-api-access-mx8mk\") pod \"16828cd5-780c-470d-9c81-fdea2edb1f0e\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.587457 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-dns-swift-storage-0\") pod \"16828cd5-780c-470d-9c81-fdea2edb1f0e\" (UID: \"16828cd5-780c-470d-9c81-fdea2edb1f0e\") " Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.593362 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16828cd5-780c-470d-9c81-fdea2edb1f0e-kube-api-access-mx8mk" (OuterVolumeSpecName: "kube-api-access-mx8mk") pod "16828cd5-780c-470d-9c81-fdea2edb1f0e" (UID: "16828cd5-780c-470d-9c81-fdea2edb1f0e"). InnerVolumeSpecName "kube-api-access-mx8mk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.648314 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "16828cd5-780c-470d-9c81-fdea2edb1f0e" (UID: "16828cd5-780c-470d-9c81-fdea2edb1f0e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.669379 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "16828cd5-780c-470d-9c81-fdea2edb1f0e" (UID: "16828cd5-780c-470d-9c81-fdea2edb1f0e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.669614 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "16828cd5-780c-470d-9c81-fdea2edb1f0e" (UID: "16828cd5-780c-470d-9c81-fdea2edb1f0e"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.684875 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-config" (OuterVolumeSpecName: "config") pod "16828cd5-780c-470d-9c81-fdea2edb1f0e" (UID: "16828cd5-780c-470d-9c81-fdea2edb1f0e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.687619 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "16828cd5-780c-470d-9c81-fdea2edb1f0e" (UID: "16828cd5-780c-470d-9c81-fdea2edb1f0e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.688862 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "16828cd5-780c-470d-9c81-fdea2edb1f0e" (UID: "16828cd5-780c-470d-9c81-fdea2edb1f0e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.689811 4871 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.689845 4871 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.689860 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.689872 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-config\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.689884 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mx8mk\" (UniqueName: \"kubernetes.io/projected/16828cd5-780c-470d-9c81-fdea2edb1f0e-kube-api-access-mx8mk\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.689896 4871 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:11 crc kubenswrapper[4871]: I1126 05:48:11.689907 4871 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/16828cd5-780c-470d-9c81-fdea2edb1f0e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:12 crc kubenswrapper[4871]: I1126 05:48:12.223081 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" event={"ID":"16828cd5-780c-470d-9c81-fdea2edb1f0e","Type":"ContainerDied","Data":"b773921d991064bffb60bcd0331978d32bc7f12425190c4969acd41ea3bb4302"} Nov 26 05:48:12 crc kubenswrapper[4871]: I1126 05:48:12.223448 4871 scope.go:117] "RemoveContainer" containerID="f75af4093beff797fe163d2165429d718fbcccdd2a8113a7c5f3ef5a0e5e42b8" Nov 26 05:48:12 crc kubenswrapper[4871]: I1126 05:48:12.223771 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bf6c7df67-n7rdc" Nov 26 05:48:12 crc kubenswrapper[4871]: I1126 05:48:12.254095 4871 scope.go:117] "RemoveContainer" containerID="807e869c508e7cde283c03aaac60ba260128a21141f3ae62637e50b4d3eab3c1" Nov 26 05:48:12 crc kubenswrapper[4871]: I1126 05:48:12.272027 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bf6c7df67-n7rdc"] Nov 26 05:48:12 crc kubenswrapper[4871]: I1126 05:48:12.283259 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bf6c7df67-n7rdc"] Nov 26 05:48:12 crc kubenswrapper[4871]: I1126 05:48:12.522586 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16828cd5-780c-470d-9c81-fdea2edb1f0e" path="/var/lib/kubelet/pods/16828cd5-780c-470d-9c81-fdea2edb1f0e/volumes" Nov 26 05:48:16 crc kubenswrapper[4871]: I1126 05:48:16.281467 4871 generic.go:334] "Generic (PLEG): container finished" podID="f823aa11-fe59-4296-9a43-81bfc1275737" containerID="3cf1135bb2eb2f2f185d3b14b1219e6aa61b5f6ff5cbd2f9d68d34bd8a1cc20f" exitCode=0 Nov 26 05:48:16 crc kubenswrapper[4871]: I1126 05:48:16.281823 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f823aa11-fe59-4296-9a43-81bfc1275737","Type":"ContainerDied","Data":"3cf1135bb2eb2f2f185d3b14b1219e6aa61b5f6ff5cbd2f9d68d34bd8a1cc20f"} Nov 26 05:48:17 crc kubenswrapper[4871]: E1126 05:48:17.226656 4871 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3f9dfba_a3a9_45ef_a96c_91c654671b97.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3f9dfba_a3a9_45ef_a96c_91c654671b97.slice/crio-519b591f0063f72f1e7c0b2b265e922312e11ab5d5648d0c0a804fc2157c27ae\": RecentStats: unable to find data in memory cache]" Nov 26 05:48:17 crc kubenswrapper[4871]: I1126 05:48:17.297250 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f823aa11-fe59-4296-9a43-81bfc1275737","Type":"ContainerStarted","Data":"05f006a214bccf0b593149e291a51bae6bd2c87df1ce1c62f78f34b361511a0c"} Nov 26 05:48:17 crc kubenswrapper[4871]: I1126 05:48:17.297548 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 26 05:48:17 crc kubenswrapper[4871]: I1126 05:48:17.326580 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.326549655 podStartE2EDuration="36.326549655s" podCreationTimestamp="2025-11-26 05:47:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:48:17.325796196 +0000 UTC m=+1355.508847822" watchObservedRunningTime="2025-11-26 05:48:17.326549655 +0000 UTC m=+1355.509601261" Nov 26 05:48:19 crc kubenswrapper[4871]: I1126 05:48:19.323229 4871 generic.go:334] "Generic (PLEG): container finished" podID="c02a9e9c-8083-4903-a64d-a140b1c9c143" containerID="4020b448e3b34785b012f6040bdd0d1d9cf83b40c73eec3be5a077fe2ad01c23" exitCode=0 Nov 26 05:48:19 crc kubenswrapper[4871]: I1126 05:48:19.323355 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c02a9e9c-8083-4903-a64d-a140b1c9c143","Type":"ContainerDied","Data":"4020b448e3b34785b012f6040bdd0d1d9cf83b40c73eec3be5a077fe2ad01c23"} Nov 26 05:48:20 crc kubenswrapper[4871]: I1126 05:48:20.338646 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c02a9e9c-8083-4903-a64d-a140b1c9c143","Type":"ContainerStarted","Data":"7f8d10a893b00d6159a7353ae1fcc84708a3444f71d8b3bc42151660a3340789"} Nov 26 05:48:20 crc kubenswrapper[4871]: I1126 05:48:20.339270 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:48:20 crc kubenswrapper[4871]: I1126 05:48:20.370791 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.370767807 podStartE2EDuration="38.370767807s" podCreationTimestamp="2025-11-26 05:47:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:48:20.364088458 +0000 UTC m=+1358.547140054" watchObservedRunningTime="2025-11-26 05:48:20.370767807 +0000 UTC m=+1358.553819393" Nov 26 05:48:23 crc kubenswrapper[4871]: I1126 05:48:23.615506 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:48:23 crc kubenswrapper[4871]: I1126 05:48:23.616127 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:48:23 crc kubenswrapper[4871]: I1126 05:48:23.616194 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:48:23 crc kubenswrapper[4871]: I1126 05:48:23.616882 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"37292f3b6ef7c2c0c15724c5c3a632dff71152a03a81708ad9d2ed933a0a1b15"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 05:48:23 crc kubenswrapper[4871]: I1126 05:48:23.616940 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://37292f3b6ef7c2c0c15724c5c3a632dff71152a03a81708ad9d2ed933a0a1b15" gracePeriod=600 Nov 26 05:48:24 crc kubenswrapper[4871]: I1126 05:48:24.412540 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="37292f3b6ef7c2c0c15724c5c3a632dff71152a03a81708ad9d2ed933a0a1b15" exitCode=0 Nov 26 05:48:24 crc kubenswrapper[4871]: I1126 05:48:24.412793 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"37292f3b6ef7c2c0c15724c5c3a632dff71152a03a81708ad9d2ed933a0a1b15"} Nov 26 05:48:24 crc kubenswrapper[4871]: I1126 05:48:24.412831 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff"} Nov 26 05:48:24 crc kubenswrapper[4871]: I1126 05:48:24.412849 4871 scope.go:117] "RemoveContainer" containerID="5865561ff4962bde5a4a448acaaef84f57651a9dc7c55ecf0253e295a67c98b1" Nov 26 05:48:27 crc kubenswrapper[4871]: E1126 05:48:27.500162 4871 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3f9dfba_a3a9_45ef_a96c_91c654671b97.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3f9dfba_a3a9_45ef_a96c_91c654671b97.slice/crio-519b591f0063f72f1e7c0b2b265e922312e11ab5d5648d0c0a804fc2157c27ae\": RecentStats: unable to find data in memory cache]" Nov 26 05:48:28 crc kubenswrapper[4871]: I1126 05:48:28.984429 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7"] Nov 26 05:48:28 crc kubenswrapper[4871]: E1126 05:48:28.985173 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d11cc8a-2c3e-421f-a156-0a811156876e" containerName="init" Nov 26 05:48:28 crc kubenswrapper[4871]: I1126 05:48:28.985192 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d11cc8a-2c3e-421f-a156-0a811156876e" containerName="init" Nov 26 05:48:28 crc kubenswrapper[4871]: E1126 05:48:28.985208 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d11cc8a-2c3e-421f-a156-0a811156876e" containerName="dnsmasq-dns" Nov 26 05:48:28 crc kubenswrapper[4871]: I1126 05:48:28.985216 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d11cc8a-2c3e-421f-a156-0a811156876e" containerName="dnsmasq-dns" Nov 26 05:48:28 crc kubenswrapper[4871]: E1126 05:48:28.985231 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16828cd5-780c-470d-9c81-fdea2edb1f0e" containerName="dnsmasq-dns" Nov 26 05:48:28 crc kubenswrapper[4871]: I1126 05:48:28.985241 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="16828cd5-780c-470d-9c81-fdea2edb1f0e" containerName="dnsmasq-dns" Nov 26 05:48:28 crc kubenswrapper[4871]: E1126 05:48:28.985255 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16828cd5-780c-470d-9c81-fdea2edb1f0e" containerName="init" Nov 26 05:48:28 crc kubenswrapper[4871]: I1126 05:48:28.985262 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="16828cd5-780c-470d-9c81-fdea2edb1f0e" containerName="init" Nov 26 05:48:28 crc kubenswrapper[4871]: I1126 05:48:28.985494 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d11cc8a-2c3e-421f-a156-0a811156876e" containerName="dnsmasq-dns" Nov 26 05:48:28 crc kubenswrapper[4871]: I1126 05:48:28.985531 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="16828cd5-780c-470d-9c81-fdea2edb1f0e" containerName="dnsmasq-dns" Nov 26 05:48:28 crc kubenswrapper[4871]: I1126 05:48:28.986262 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" Nov 26 05:48:28 crc kubenswrapper[4871]: I1126 05:48:28.992081 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 05:48:28 crc kubenswrapper[4871]: I1126 05:48:28.992733 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 05:48:28 crc kubenswrapper[4871]: I1126 05:48:28.992508 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 05:48:28 crc kubenswrapper[4871]: I1126 05:48:28.994348 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-pjzlp" Nov 26 05:48:29 crc kubenswrapper[4871]: I1126 05:48:29.001770 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7"] Nov 26 05:48:29 crc kubenswrapper[4871]: I1126 05:48:29.129690 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7\" (UID: \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" Nov 26 05:48:29 crc kubenswrapper[4871]: I1126 05:48:29.129778 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7\" (UID: \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" Nov 26 05:48:29 crc kubenswrapper[4871]: I1126 05:48:29.129864 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7\" (UID: \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" Nov 26 05:48:29 crc kubenswrapper[4871]: I1126 05:48:29.129954 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mm7q8\" (UniqueName: \"kubernetes.io/projected/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-kube-api-access-mm7q8\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7\" (UID: \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" Nov 26 05:48:29 crc kubenswrapper[4871]: I1126 05:48:29.231448 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7\" (UID: \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" Nov 26 05:48:29 crc kubenswrapper[4871]: I1126 05:48:29.231525 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7\" (UID: \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" Nov 26 05:48:29 crc kubenswrapper[4871]: I1126 05:48:29.231623 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mm7q8\" (UniqueName: \"kubernetes.io/projected/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-kube-api-access-mm7q8\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7\" (UID: \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" Nov 26 05:48:29 crc kubenswrapper[4871]: I1126 05:48:29.231699 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7\" (UID: \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" Nov 26 05:48:29 crc kubenswrapper[4871]: I1126 05:48:29.242250 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7\" (UID: \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" Nov 26 05:48:29 crc kubenswrapper[4871]: I1126 05:48:29.242533 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7\" (UID: \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" Nov 26 05:48:29 crc kubenswrapper[4871]: I1126 05:48:29.251891 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7\" (UID: \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" Nov 26 05:48:29 crc kubenswrapper[4871]: I1126 05:48:29.258293 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mm7q8\" (UniqueName: \"kubernetes.io/projected/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-kube-api-access-mm7q8\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7\" (UID: \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" Nov 26 05:48:29 crc kubenswrapper[4871]: I1126 05:48:29.320135 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" Nov 26 05:48:29 crc kubenswrapper[4871]: W1126 05:48:29.973301 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2024b11a_b0d5_4988_ba36_cdcb7eb4d5c2.slice/crio-e529c4f96db7cbce9c4b34fe76b379c0e9c0719c5f61ed52509ca06faeb7178a WatchSource:0}: Error finding container e529c4f96db7cbce9c4b34fe76b379c0e9c0719c5f61ed52509ca06faeb7178a: Status 404 returned error can't find the container with id e529c4f96db7cbce9c4b34fe76b379c0e9c0719c5f61ed52509ca06faeb7178a Nov 26 05:48:29 crc kubenswrapper[4871]: I1126 05:48:29.977915 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7"] Nov 26 05:48:30 crc kubenswrapper[4871]: I1126 05:48:30.501556 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" event={"ID":"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2","Type":"ContainerStarted","Data":"e529c4f96db7cbce9c4b34fe76b379c0e9c0719c5f61ed52509ca06faeb7178a"} Nov 26 05:48:31 crc kubenswrapper[4871]: I1126 05:48:31.465163 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 26 05:48:33 crc kubenswrapper[4871]: I1126 05:48:33.246748 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 26 05:48:37 crc kubenswrapper[4871]: E1126 05:48:37.765980 4871 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3f9dfba_a3a9_45ef_a96c_91c654671b97.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3f9dfba_a3a9_45ef_a96c_91c654671b97.slice/crio-519b591f0063f72f1e7c0b2b265e922312e11ab5d5648d0c0a804fc2157c27ae\": RecentStats: unable to find data in memory cache]" Nov 26 05:48:39 crc kubenswrapper[4871]: I1126 05:48:39.635901 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" event={"ID":"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2","Type":"ContainerStarted","Data":"05c85c6e620b17decafba65f3f6da9869f3191fc5026cccc29a54f3fb4ea00f5"} Nov 26 05:48:39 crc kubenswrapper[4871]: I1126 05:48:39.668093 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" podStartSLOduration=2.857608448 podStartE2EDuration="11.668059168s" podCreationTimestamp="2025-11-26 05:48:28 +0000 UTC" firstStartedPulling="2025-11-26 05:48:29.975380792 +0000 UTC m=+1368.158432388" lastFinishedPulling="2025-11-26 05:48:38.785831522 +0000 UTC m=+1376.968883108" observedRunningTime="2025-11-26 05:48:39.659067125 +0000 UTC m=+1377.842118751" watchObservedRunningTime="2025-11-26 05:48:39.668059168 +0000 UTC m=+1377.851110794" Nov 26 05:48:42 crc kubenswrapper[4871]: E1126 05:48:42.539337 4871 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/5d21cf98b992347e97c2f8a09d62326081f5259d902a95eea1daa61e55082a93/diff" to get inode usage: stat /var/lib/containers/storage/overlay/5d21cf98b992347e97c2f8a09d62326081f5259d902a95eea1daa61e55082a93/diff: no such file or directory, extraDiskErr: Nov 26 05:48:50 crc kubenswrapper[4871]: I1126 05:48:50.800581 4871 generic.go:334] "Generic (PLEG): container finished" podID="2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2" containerID="05c85c6e620b17decafba65f3f6da9869f3191fc5026cccc29a54f3fb4ea00f5" exitCode=0 Nov 26 05:48:50 crc kubenswrapper[4871]: I1126 05:48:50.800666 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" event={"ID":"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2","Type":"ContainerDied","Data":"05c85c6e620b17decafba65f3f6da9869f3191fc5026cccc29a54f3fb4ea00f5"} Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.352124 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.467007 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-repo-setup-combined-ca-bundle\") pod \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\" (UID: \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\") " Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.467371 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-ssh-key\") pod \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\" (UID: \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\") " Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.467505 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mm7q8\" (UniqueName: \"kubernetes.io/projected/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-kube-api-access-mm7q8\") pod \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\" (UID: \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\") " Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.467578 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-inventory\") pod \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\" (UID: \"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2\") " Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.473691 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-kube-api-access-mm7q8" (OuterVolumeSpecName: "kube-api-access-mm7q8") pod "2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2" (UID: "2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2"). InnerVolumeSpecName "kube-api-access-mm7q8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.480824 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2" (UID: "2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.498981 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-inventory" (OuterVolumeSpecName: "inventory") pod "2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2" (UID: "2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.504299 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2" (UID: "2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.569893 4871 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.569986 4871 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.570037 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mm7q8\" (UniqueName: \"kubernetes.io/projected/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-kube-api-access-mm7q8\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.570056 4871 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.829788 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" event={"ID":"2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2","Type":"ContainerDied","Data":"e529c4f96db7cbce9c4b34fe76b379c0e9c0719c5f61ed52509ca06faeb7178a"} Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.829876 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e529c4f96db7cbce9c4b34fe76b379c0e9c0719c5f61ed52509ca06faeb7178a" Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.829891 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7" Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.994320 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj"] Nov 26 05:48:52 crc kubenswrapper[4871]: E1126 05:48:52.994752 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.994772 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.994985 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.995653 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj" Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.998498 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.998635 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 05:48:52 crc kubenswrapper[4871]: I1126 05:48:52.998672 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-pjzlp" Nov 26 05:48:53 crc kubenswrapper[4871]: I1126 05:48:53.001323 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 05:48:53 crc kubenswrapper[4871]: I1126 05:48:53.018075 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj"] Nov 26 05:48:53 crc kubenswrapper[4871]: I1126 05:48:53.182358 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkn55\" (UniqueName: \"kubernetes.io/projected/4027b3b8-7a16-419f-8b16-52ff000c7268-kube-api-access-mkn55\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-vgjkj\" (UID: \"4027b3b8-7a16-419f-8b16-52ff000c7268\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj" Nov 26 05:48:53 crc kubenswrapper[4871]: I1126 05:48:53.182664 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4027b3b8-7a16-419f-8b16-52ff000c7268-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-vgjkj\" (UID: \"4027b3b8-7a16-419f-8b16-52ff000c7268\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj" Nov 26 05:48:53 crc kubenswrapper[4871]: I1126 05:48:53.182926 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4027b3b8-7a16-419f-8b16-52ff000c7268-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-vgjkj\" (UID: \"4027b3b8-7a16-419f-8b16-52ff000c7268\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj" Nov 26 05:48:53 crc kubenswrapper[4871]: I1126 05:48:53.284152 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4027b3b8-7a16-419f-8b16-52ff000c7268-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-vgjkj\" (UID: \"4027b3b8-7a16-419f-8b16-52ff000c7268\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj" Nov 26 05:48:53 crc kubenswrapper[4871]: I1126 05:48:53.284602 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4027b3b8-7a16-419f-8b16-52ff000c7268-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-vgjkj\" (UID: \"4027b3b8-7a16-419f-8b16-52ff000c7268\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj" Nov 26 05:48:53 crc kubenswrapper[4871]: I1126 05:48:53.284780 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkn55\" (UniqueName: \"kubernetes.io/projected/4027b3b8-7a16-419f-8b16-52ff000c7268-kube-api-access-mkn55\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-vgjkj\" (UID: \"4027b3b8-7a16-419f-8b16-52ff000c7268\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj" Nov 26 05:48:53 crc kubenswrapper[4871]: I1126 05:48:53.289895 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4027b3b8-7a16-419f-8b16-52ff000c7268-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-vgjkj\" (UID: \"4027b3b8-7a16-419f-8b16-52ff000c7268\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj" Nov 26 05:48:53 crc kubenswrapper[4871]: I1126 05:48:53.291205 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4027b3b8-7a16-419f-8b16-52ff000c7268-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-vgjkj\" (UID: \"4027b3b8-7a16-419f-8b16-52ff000c7268\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj" Nov 26 05:48:53 crc kubenswrapper[4871]: I1126 05:48:53.308307 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkn55\" (UniqueName: \"kubernetes.io/projected/4027b3b8-7a16-419f-8b16-52ff000c7268-kube-api-access-mkn55\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-vgjkj\" (UID: \"4027b3b8-7a16-419f-8b16-52ff000c7268\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj" Nov 26 05:48:53 crc kubenswrapper[4871]: I1126 05:48:53.330024 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj" Nov 26 05:48:53 crc kubenswrapper[4871]: I1126 05:48:53.904735 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj"] Nov 26 05:48:54 crc kubenswrapper[4871]: I1126 05:48:54.854388 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj" event={"ID":"4027b3b8-7a16-419f-8b16-52ff000c7268","Type":"ContainerStarted","Data":"2aa7834f9563463f4bee601569d0a75a16f3e6eb31e2cc654295777fecea382b"} Nov 26 05:48:54 crc kubenswrapper[4871]: I1126 05:48:54.854833 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj" event={"ID":"4027b3b8-7a16-419f-8b16-52ff000c7268","Type":"ContainerStarted","Data":"977396be470743a9b58d18971ac9f30ea53448b537c267e16b30c3e8ee74d5ee"} Nov 26 05:48:54 crc kubenswrapper[4871]: I1126 05:48:54.877603 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj" podStartSLOduration=2.41593332 podStartE2EDuration="2.877569863s" podCreationTimestamp="2025-11-26 05:48:52 +0000 UTC" firstStartedPulling="2025-11-26 05:48:53.919985505 +0000 UTC m=+1392.103037091" lastFinishedPulling="2025-11-26 05:48:54.381622038 +0000 UTC m=+1392.564673634" observedRunningTime="2025-11-26 05:48:54.871222796 +0000 UTC m=+1393.054274422" watchObservedRunningTime="2025-11-26 05:48:54.877569863 +0000 UTC m=+1393.060621499" Nov 26 05:48:57 crc kubenswrapper[4871]: I1126 05:48:57.907840 4871 generic.go:334] "Generic (PLEG): container finished" podID="4027b3b8-7a16-419f-8b16-52ff000c7268" containerID="2aa7834f9563463f4bee601569d0a75a16f3e6eb31e2cc654295777fecea382b" exitCode=0 Nov 26 05:48:57 crc kubenswrapper[4871]: I1126 05:48:57.908018 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj" event={"ID":"4027b3b8-7a16-419f-8b16-52ff000c7268","Type":"ContainerDied","Data":"2aa7834f9563463f4bee601569d0a75a16f3e6eb31e2cc654295777fecea382b"} Nov 26 05:48:59 crc kubenswrapper[4871]: I1126 05:48:59.449367 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj" Nov 26 05:48:59 crc kubenswrapper[4871]: I1126 05:48:59.640644 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4027b3b8-7a16-419f-8b16-52ff000c7268-inventory\") pod \"4027b3b8-7a16-419f-8b16-52ff000c7268\" (UID: \"4027b3b8-7a16-419f-8b16-52ff000c7268\") " Nov 26 05:48:59 crc kubenswrapper[4871]: I1126 05:48:59.640807 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4027b3b8-7a16-419f-8b16-52ff000c7268-ssh-key\") pod \"4027b3b8-7a16-419f-8b16-52ff000c7268\" (UID: \"4027b3b8-7a16-419f-8b16-52ff000c7268\") " Nov 26 05:48:59 crc kubenswrapper[4871]: I1126 05:48:59.640986 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkn55\" (UniqueName: \"kubernetes.io/projected/4027b3b8-7a16-419f-8b16-52ff000c7268-kube-api-access-mkn55\") pod \"4027b3b8-7a16-419f-8b16-52ff000c7268\" (UID: \"4027b3b8-7a16-419f-8b16-52ff000c7268\") " Nov 26 05:48:59 crc kubenswrapper[4871]: I1126 05:48:59.652210 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4027b3b8-7a16-419f-8b16-52ff000c7268-kube-api-access-mkn55" (OuterVolumeSpecName: "kube-api-access-mkn55") pod "4027b3b8-7a16-419f-8b16-52ff000c7268" (UID: "4027b3b8-7a16-419f-8b16-52ff000c7268"). InnerVolumeSpecName "kube-api-access-mkn55". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:48:59 crc kubenswrapper[4871]: I1126 05:48:59.681238 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4027b3b8-7a16-419f-8b16-52ff000c7268-inventory" (OuterVolumeSpecName: "inventory") pod "4027b3b8-7a16-419f-8b16-52ff000c7268" (UID: "4027b3b8-7a16-419f-8b16-52ff000c7268"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:48:59 crc kubenswrapper[4871]: I1126 05:48:59.702116 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4027b3b8-7a16-419f-8b16-52ff000c7268-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4027b3b8-7a16-419f-8b16-52ff000c7268" (UID: "4027b3b8-7a16-419f-8b16-52ff000c7268"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:48:59 crc kubenswrapper[4871]: I1126 05:48:59.743860 4871 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4027b3b8-7a16-419f-8b16-52ff000c7268-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:59 crc kubenswrapper[4871]: I1126 05:48:59.744079 4871 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4027b3b8-7a16-419f-8b16-52ff000c7268-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:59 crc kubenswrapper[4871]: I1126 05:48:59.744270 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkn55\" (UniqueName: \"kubernetes.io/projected/4027b3b8-7a16-419f-8b16-52ff000c7268-kube-api-access-mkn55\") on node \"crc\" DevicePath \"\"" Nov 26 05:48:59 crc kubenswrapper[4871]: I1126 05:48:59.940061 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj" event={"ID":"4027b3b8-7a16-419f-8b16-52ff000c7268","Type":"ContainerDied","Data":"977396be470743a9b58d18971ac9f30ea53448b537c267e16b30c3e8ee74d5ee"} Nov 26 05:48:59 crc kubenswrapper[4871]: I1126 05:48:59.940121 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="977396be470743a9b58d18971ac9f30ea53448b537c267e16b30c3e8ee74d5ee" Nov 26 05:48:59 crc kubenswrapper[4871]: I1126 05:48:59.940147 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-vgjkj" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.076894 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq"] Nov 26 05:49:00 crc kubenswrapper[4871]: E1126 05:49:00.077563 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4027b3b8-7a16-419f-8b16-52ff000c7268" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.077592 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="4027b3b8-7a16-419f-8b16-52ff000c7268" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.077991 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="4027b3b8-7a16-419f-8b16-52ff000c7268" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.079081 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.088091 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.088155 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-pjzlp" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.088261 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.088753 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.099283 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq"] Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.256070 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a811292e-f231-48cd-98b5-4acd21f945ed-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq\" (UID: \"a811292e-f231-48cd-98b5-4acd21f945ed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.256294 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a811292e-f231-48cd-98b5-4acd21f945ed-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq\" (UID: \"a811292e-f231-48cd-98b5-4acd21f945ed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.256397 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5rg7\" (UniqueName: \"kubernetes.io/projected/a811292e-f231-48cd-98b5-4acd21f945ed-kube-api-access-k5rg7\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq\" (UID: \"a811292e-f231-48cd-98b5-4acd21f945ed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.256453 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a811292e-f231-48cd-98b5-4acd21f945ed-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq\" (UID: \"a811292e-f231-48cd-98b5-4acd21f945ed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.358777 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a811292e-f231-48cd-98b5-4acd21f945ed-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq\" (UID: \"a811292e-f231-48cd-98b5-4acd21f945ed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.358929 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a811292e-f231-48cd-98b5-4acd21f945ed-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq\" (UID: \"a811292e-f231-48cd-98b5-4acd21f945ed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.359083 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a811292e-f231-48cd-98b5-4acd21f945ed-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq\" (UID: \"a811292e-f231-48cd-98b5-4acd21f945ed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.359203 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5rg7\" (UniqueName: \"kubernetes.io/projected/a811292e-f231-48cd-98b5-4acd21f945ed-kube-api-access-k5rg7\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq\" (UID: \"a811292e-f231-48cd-98b5-4acd21f945ed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.369101 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a811292e-f231-48cd-98b5-4acd21f945ed-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq\" (UID: \"a811292e-f231-48cd-98b5-4acd21f945ed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.369638 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a811292e-f231-48cd-98b5-4acd21f945ed-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq\" (UID: \"a811292e-f231-48cd-98b5-4acd21f945ed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.371457 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a811292e-f231-48cd-98b5-4acd21f945ed-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq\" (UID: \"a811292e-f231-48cd-98b5-4acd21f945ed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.387221 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5rg7\" (UniqueName: \"kubernetes.io/projected/a811292e-f231-48cd-98b5-4acd21f945ed-kube-api-access-k5rg7\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq\" (UID: \"a811292e-f231-48cd-98b5-4acd21f945ed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.404512 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.847088 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq"] Nov 26 05:49:00 crc kubenswrapper[4871]: I1126 05:49:00.956358 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" event={"ID":"a811292e-f231-48cd-98b5-4acd21f945ed","Type":"ContainerStarted","Data":"2e99cd27abe47dfd72e85d17545ac48fef85bca76c7c8dcdae7ad6deb7ded26d"} Nov 26 05:49:01 crc kubenswrapper[4871]: I1126 05:49:01.973106 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" event={"ID":"a811292e-f231-48cd-98b5-4acd21f945ed","Type":"ContainerStarted","Data":"1cd1c751232512b20730529b078f5745d9dabc89b06c89017fa56b4cb0d08de5"} Nov 26 05:49:02 crc kubenswrapper[4871]: I1126 05:49:02.008245 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" podStartSLOduration=1.631763686 podStartE2EDuration="2.008216334s" podCreationTimestamp="2025-11-26 05:49:00 +0000 UTC" firstStartedPulling="2025-11-26 05:49:00.851310467 +0000 UTC m=+1399.034362063" lastFinishedPulling="2025-11-26 05:49:01.227763075 +0000 UTC m=+1399.410814711" observedRunningTime="2025-11-26 05:49:01.995892338 +0000 UTC m=+1400.178943934" watchObservedRunningTime="2025-11-26 05:49:02.008216334 +0000 UTC m=+1400.191267940" Nov 26 05:49:09 crc kubenswrapper[4871]: I1126 05:49:09.068378 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fx8x2"] Nov 26 05:49:09 crc kubenswrapper[4871]: I1126 05:49:09.070954 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fx8x2" Nov 26 05:49:09 crc kubenswrapper[4871]: I1126 05:49:09.077486 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fx8x2"] Nov 26 05:49:09 crc kubenswrapper[4871]: I1126 05:49:09.177893 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e09f3b3-2624-44fe-8108-bb040a8b1252-catalog-content\") pod \"redhat-operators-fx8x2\" (UID: \"8e09f3b3-2624-44fe-8108-bb040a8b1252\") " pod="openshift-marketplace/redhat-operators-fx8x2" Nov 26 05:49:09 crc kubenswrapper[4871]: I1126 05:49:09.178310 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vs2pz\" (UniqueName: \"kubernetes.io/projected/8e09f3b3-2624-44fe-8108-bb040a8b1252-kube-api-access-vs2pz\") pod \"redhat-operators-fx8x2\" (UID: \"8e09f3b3-2624-44fe-8108-bb040a8b1252\") " pod="openshift-marketplace/redhat-operators-fx8x2" Nov 26 05:49:09 crc kubenswrapper[4871]: I1126 05:49:09.178474 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e09f3b3-2624-44fe-8108-bb040a8b1252-utilities\") pod \"redhat-operators-fx8x2\" (UID: \"8e09f3b3-2624-44fe-8108-bb040a8b1252\") " pod="openshift-marketplace/redhat-operators-fx8x2" Nov 26 05:49:09 crc kubenswrapper[4871]: I1126 05:49:09.280863 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vs2pz\" (UniqueName: \"kubernetes.io/projected/8e09f3b3-2624-44fe-8108-bb040a8b1252-kube-api-access-vs2pz\") pod \"redhat-operators-fx8x2\" (UID: \"8e09f3b3-2624-44fe-8108-bb040a8b1252\") " pod="openshift-marketplace/redhat-operators-fx8x2" Nov 26 05:49:09 crc kubenswrapper[4871]: I1126 05:49:09.281130 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e09f3b3-2624-44fe-8108-bb040a8b1252-utilities\") pod \"redhat-operators-fx8x2\" (UID: \"8e09f3b3-2624-44fe-8108-bb040a8b1252\") " pod="openshift-marketplace/redhat-operators-fx8x2" Nov 26 05:49:09 crc kubenswrapper[4871]: I1126 05:49:09.281302 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e09f3b3-2624-44fe-8108-bb040a8b1252-catalog-content\") pod \"redhat-operators-fx8x2\" (UID: \"8e09f3b3-2624-44fe-8108-bb040a8b1252\") " pod="openshift-marketplace/redhat-operators-fx8x2" Nov 26 05:49:09 crc kubenswrapper[4871]: I1126 05:49:09.281894 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e09f3b3-2624-44fe-8108-bb040a8b1252-catalog-content\") pod \"redhat-operators-fx8x2\" (UID: \"8e09f3b3-2624-44fe-8108-bb040a8b1252\") " pod="openshift-marketplace/redhat-operators-fx8x2" Nov 26 05:49:09 crc kubenswrapper[4871]: I1126 05:49:09.282217 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e09f3b3-2624-44fe-8108-bb040a8b1252-utilities\") pod \"redhat-operators-fx8x2\" (UID: \"8e09f3b3-2624-44fe-8108-bb040a8b1252\") " pod="openshift-marketplace/redhat-operators-fx8x2" Nov 26 05:49:09 crc kubenswrapper[4871]: I1126 05:49:09.304984 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vs2pz\" (UniqueName: \"kubernetes.io/projected/8e09f3b3-2624-44fe-8108-bb040a8b1252-kube-api-access-vs2pz\") pod \"redhat-operators-fx8x2\" (UID: \"8e09f3b3-2624-44fe-8108-bb040a8b1252\") " pod="openshift-marketplace/redhat-operators-fx8x2" Nov 26 05:49:09 crc kubenswrapper[4871]: I1126 05:49:09.434907 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fx8x2" Nov 26 05:49:09 crc kubenswrapper[4871]: I1126 05:49:09.911362 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fx8x2"] Nov 26 05:49:10 crc kubenswrapper[4871]: I1126 05:49:10.066388 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx8x2" event={"ID":"8e09f3b3-2624-44fe-8108-bb040a8b1252","Type":"ContainerStarted","Data":"c6c375e9f9f37bf7a3cf446fbae3b1141641056b60004aba1f808810578440b1"} Nov 26 05:49:11 crc kubenswrapper[4871]: I1126 05:49:11.084599 4871 generic.go:334] "Generic (PLEG): container finished" podID="8e09f3b3-2624-44fe-8108-bb040a8b1252" containerID="cd3b2703144726943ccb2180d148f4805fa25c14495964d4d10a5873514b39c3" exitCode=0 Nov 26 05:49:11 crc kubenswrapper[4871]: I1126 05:49:11.084866 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx8x2" event={"ID":"8e09f3b3-2624-44fe-8108-bb040a8b1252","Type":"ContainerDied","Data":"cd3b2703144726943ccb2180d148f4805fa25c14495964d4d10a5873514b39c3"} Nov 26 05:49:13 crc kubenswrapper[4871]: I1126 05:49:13.113661 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx8x2" event={"ID":"8e09f3b3-2624-44fe-8108-bb040a8b1252","Type":"ContainerStarted","Data":"4ea9976c2d1a87fb3e42e45554185fc4bda8323e5e347b73fe49e16e3444cfd6"} Nov 26 05:49:16 crc kubenswrapper[4871]: I1126 05:49:16.150223 4871 generic.go:334] "Generic (PLEG): container finished" podID="8e09f3b3-2624-44fe-8108-bb040a8b1252" containerID="4ea9976c2d1a87fb3e42e45554185fc4bda8323e5e347b73fe49e16e3444cfd6" exitCode=0 Nov 26 05:49:16 crc kubenswrapper[4871]: I1126 05:49:16.150777 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx8x2" event={"ID":"8e09f3b3-2624-44fe-8108-bb040a8b1252","Type":"ContainerDied","Data":"4ea9976c2d1a87fb3e42e45554185fc4bda8323e5e347b73fe49e16e3444cfd6"} Nov 26 05:49:17 crc kubenswrapper[4871]: I1126 05:49:17.166741 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx8x2" event={"ID":"8e09f3b3-2624-44fe-8108-bb040a8b1252","Type":"ContainerStarted","Data":"b36303ed6690ab1f1d9499722d1b4702af4fbeabef0ced022233873ea45eb6c5"} Nov 26 05:49:17 crc kubenswrapper[4871]: I1126 05:49:17.188242 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fx8x2" podStartSLOduration=2.73060152 podStartE2EDuration="8.188221737s" podCreationTimestamp="2025-11-26 05:49:09 +0000 UTC" firstStartedPulling="2025-11-26 05:49:11.086792873 +0000 UTC m=+1409.269844469" lastFinishedPulling="2025-11-26 05:49:16.5444131 +0000 UTC m=+1414.727464686" observedRunningTime="2025-11-26 05:49:17.18394321 +0000 UTC m=+1415.366994816" watchObservedRunningTime="2025-11-26 05:49:17.188221737 +0000 UTC m=+1415.371273333" Nov 26 05:49:19 crc kubenswrapper[4871]: I1126 05:49:18.868409 4871 scope.go:117] "RemoveContainer" containerID="7f8ee206d672f505d17cead024215e90be09e4e8f2e5ff42e1513630e20112bc" Nov 26 05:49:19 crc kubenswrapper[4871]: I1126 05:49:19.435771 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fx8x2" Nov 26 05:49:19 crc kubenswrapper[4871]: I1126 05:49:19.437100 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fx8x2" Nov 26 05:49:20 crc kubenswrapper[4871]: I1126 05:49:20.528801 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fx8x2" podUID="8e09f3b3-2624-44fe-8108-bb040a8b1252" containerName="registry-server" probeResult="failure" output=< Nov 26 05:49:20 crc kubenswrapper[4871]: timeout: failed to connect service ":50051" within 1s Nov 26 05:49:20 crc kubenswrapper[4871]: > Nov 26 05:49:22 crc kubenswrapper[4871]: I1126 05:49:22.432186 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rst5l"] Nov 26 05:49:22 crc kubenswrapper[4871]: I1126 05:49:22.434468 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rst5l" Nov 26 05:49:22 crc kubenswrapper[4871]: I1126 05:49:22.448700 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rst5l"] Nov 26 05:49:22 crc kubenswrapper[4871]: I1126 05:49:22.613797 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06c3f62a-20ca-4faa-9b23-9f83db9be0c4-utilities\") pod \"redhat-marketplace-rst5l\" (UID: \"06c3f62a-20ca-4faa-9b23-9f83db9be0c4\") " pod="openshift-marketplace/redhat-marketplace-rst5l" Nov 26 05:49:22 crc kubenswrapper[4871]: I1126 05:49:22.613910 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06c3f62a-20ca-4faa-9b23-9f83db9be0c4-catalog-content\") pod \"redhat-marketplace-rst5l\" (UID: \"06c3f62a-20ca-4faa-9b23-9f83db9be0c4\") " pod="openshift-marketplace/redhat-marketplace-rst5l" Nov 26 05:49:22 crc kubenswrapper[4871]: I1126 05:49:22.614023 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k674x\" (UniqueName: \"kubernetes.io/projected/06c3f62a-20ca-4faa-9b23-9f83db9be0c4-kube-api-access-k674x\") pod \"redhat-marketplace-rst5l\" (UID: \"06c3f62a-20ca-4faa-9b23-9f83db9be0c4\") " pod="openshift-marketplace/redhat-marketplace-rst5l" Nov 26 05:49:22 crc kubenswrapper[4871]: I1126 05:49:22.715463 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k674x\" (UniqueName: \"kubernetes.io/projected/06c3f62a-20ca-4faa-9b23-9f83db9be0c4-kube-api-access-k674x\") pod \"redhat-marketplace-rst5l\" (UID: \"06c3f62a-20ca-4faa-9b23-9f83db9be0c4\") " pod="openshift-marketplace/redhat-marketplace-rst5l" Nov 26 05:49:22 crc kubenswrapper[4871]: I1126 05:49:22.715604 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06c3f62a-20ca-4faa-9b23-9f83db9be0c4-utilities\") pod \"redhat-marketplace-rst5l\" (UID: \"06c3f62a-20ca-4faa-9b23-9f83db9be0c4\") " pod="openshift-marketplace/redhat-marketplace-rst5l" Nov 26 05:49:22 crc kubenswrapper[4871]: I1126 05:49:22.715696 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06c3f62a-20ca-4faa-9b23-9f83db9be0c4-catalog-content\") pod \"redhat-marketplace-rst5l\" (UID: \"06c3f62a-20ca-4faa-9b23-9f83db9be0c4\") " pod="openshift-marketplace/redhat-marketplace-rst5l" Nov 26 05:49:22 crc kubenswrapper[4871]: I1126 05:49:22.716154 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06c3f62a-20ca-4faa-9b23-9f83db9be0c4-utilities\") pod \"redhat-marketplace-rst5l\" (UID: \"06c3f62a-20ca-4faa-9b23-9f83db9be0c4\") " pod="openshift-marketplace/redhat-marketplace-rst5l" Nov 26 05:49:22 crc kubenswrapper[4871]: I1126 05:49:22.716180 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06c3f62a-20ca-4faa-9b23-9f83db9be0c4-catalog-content\") pod \"redhat-marketplace-rst5l\" (UID: \"06c3f62a-20ca-4faa-9b23-9f83db9be0c4\") " pod="openshift-marketplace/redhat-marketplace-rst5l" Nov 26 05:49:22 crc kubenswrapper[4871]: I1126 05:49:22.741883 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k674x\" (UniqueName: \"kubernetes.io/projected/06c3f62a-20ca-4faa-9b23-9f83db9be0c4-kube-api-access-k674x\") pod \"redhat-marketplace-rst5l\" (UID: \"06c3f62a-20ca-4faa-9b23-9f83db9be0c4\") " pod="openshift-marketplace/redhat-marketplace-rst5l" Nov 26 05:49:22 crc kubenswrapper[4871]: I1126 05:49:22.759064 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rst5l" Nov 26 05:49:23 crc kubenswrapper[4871]: I1126 05:49:23.242795 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rst5l"] Nov 26 05:49:24 crc kubenswrapper[4871]: I1126 05:49:24.262123 4871 generic.go:334] "Generic (PLEG): container finished" podID="06c3f62a-20ca-4faa-9b23-9f83db9be0c4" containerID="1cfaf877d8f98ea24b3d5dae457610f4e05629ef13ee95b6871cbbec75045c44" exitCode=0 Nov 26 05:49:24 crc kubenswrapper[4871]: I1126 05:49:24.262609 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rst5l" event={"ID":"06c3f62a-20ca-4faa-9b23-9f83db9be0c4","Type":"ContainerDied","Data":"1cfaf877d8f98ea24b3d5dae457610f4e05629ef13ee95b6871cbbec75045c44"} Nov 26 05:49:24 crc kubenswrapper[4871]: I1126 05:49:24.262664 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rst5l" event={"ID":"06c3f62a-20ca-4faa-9b23-9f83db9be0c4","Type":"ContainerStarted","Data":"30f68c5d643e5e60591c45da3013639bd5504d60e64ead7fb9b2c179ba742c20"} Nov 26 05:49:25 crc kubenswrapper[4871]: I1126 05:49:25.277871 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rst5l" event={"ID":"06c3f62a-20ca-4faa-9b23-9f83db9be0c4","Type":"ContainerStarted","Data":"f3e82497494bc65b26e2c3be7334fb1f3a82b2998d9cde86f05f017a6354a647"} Nov 26 05:49:26 crc kubenswrapper[4871]: I1126 05:49:26.288612 4871 generic.go:334] "Generic (PLEG): container finished" podID="06c3f62a-20ca-4faa-9b23-9f83db9be0c4" containerID="f3e82497494bc65b26e2c3be7334fb1f3a82b2998d9cde86f05f017a6354a647" exitCode=0 Nov 26 05:49:26 crc kubenswrapper[4871]: I1126 05:49:26.288718 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rst5l" event={"ID":"06c3f62a-20ca-4faa-9b23-9f83db9be0c4","Type":"ContainerDied","Data":"f3e82497494bc65b26e2c3be7334fb1f3a82b2998d9cde86f05f017a6354a647"} Nov 26 05:49:27 crc kubenswrapper[4871]: I1126 05:49:27.309856 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rst5l" event={"ID":"06c3f62a-20ca-4faa-9b23-9f83db9be0c4","Type":"ContainerStarted","Data":"0eca58d8c0730563938d740148142dcaafe22f47f83df59ffaeb48e3e8ecbdd9"} Nov 26 05:49:27 crc kubenswrapper[4871]: I1126 05:49:27.338400 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rst5l" podStartSLOduration=2.898081928 podStartE2EDuration="5.338379063s" podCreationTimestamp="2025-11-26 05:49:22 +0000 UTC" firstStartedPulling="2025-11-26 05:49:24.263658045 +0000 UTC m=+1422.446709631" lastFinishedPulling="2025-11-26 05:49:26.70395518 +0000 UTC m=+1424.887006766" observedRunningTime="2025-11-26 05:49:27.336120247 +0000 UTC m=+1425.519171833" watchObservedRunningTime="2025-11-26 05:49:27.338379063 +0000 UTC m=+1425.521430649" Nov 26 05:49:30 crc kubenswrapper[4871]: I1126 05:49:30.489434 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fx8x2" podUID="8e09f3b3-2624-44fe-8108-bb040a8b1252" containerName="registry-server" probeResult="failure" output=< Nov 26 05:49:30 crc kubenswrapper[4871]: timeout: failed to connect service ":50051" within 1s Nov 26 05:49:30 crc kubenswrapper[4871]: > Nov 26 05:49:32 crc kubenswrapper[4871]: I1126 05:49:32.759653 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rst5l" Nov 26 05:49:32 crc kubenswrapper[4871]: I1126 05:49:32.759951 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rst5l" Nov 26 05:49:32 crc kubenswrapper[4871]: I1126 05:49:32.831660 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rst5l" Nov 26 05:49:33 crc kubenswrapper[4871]: I1126 05:49:33.429911 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rst5l" Nov 26 05:49:33 crc kubenswrapper[4871]: I1126 05:49:33.475583 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rst5l"] Nov 26 05:49:35 crc kubenswrapper[4871]: I1126 05:49:35.411332 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rst5l" podUID="06c3f62a-20ca-4faa-9b23-9f83db9be0c4" containerName="registry-server" containerID="cri-o://0eca58d8c0730563938d740148142dcaafe22f47f83df59ffaeb48e3e8ecbdd9" gracePeriod=2 Nov 26 05:49:35 crc kubenswrapper[4871]: I1126 05:49:35.902660 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rst5l" Nov 26 05:49:35 crc kubenswrapper[4871]: I1126 05:49:35.993586 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06c3f62a-20ca-4faa-9b23-9f83db9be0c4-catalog-content\") pod \"06c3f62a-20ca-4faa-9b23-9f83db9be0c4\" (UID: \"06c3f62a-20ca-4faa-9b23-9f83db9be0c4\") " Nov 26 05:49:35 crc kubenswrapper[4871]: I1126 05:49:35.993793 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06c3f62a-20ca-4faa-9b23-9f83db9be0c4-utilities\") pod \"06c3f62a-20ca-4faa-9b23-9f83db9be0c4\" (UID: \"06c3f62a-20ca-4faa-9b23-9f83db9be0c4\") " Nov 26 05:49:35 crc kubenswrapper[4871]: I1126 05:49:35.993847 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k674x\" (UniqueName: \"kubernetes.io/projected/06c3f62a-20ca-4faa-9b23-9f83db9be0c4-kube-api-access-k674x\") pod \"06c3f62a-20ca-4faa-9b23-9f83db9be0c4\" (UID: \"06c3f62a-20ca-4faa-9b23-9f83db9be0c4\") " Nov 26 05:49:35 crc kubenswrapper[4871]: I1126 05:49:35.995183 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06c3f62a-20ca-4faa-9b23-9f83db9be0c4-utilities" (OuterVolumeSpecName: "utilities") pod "06c3f62a-20ca-4faa-9b23-9f83db9be0c4" (UID: "06c3f62a-20ca-4faa-9b23-9f83db9be0c4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.001891 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06c3f62a-20ca-4faa-9b23-9f83db9be0c4-kube-api-access-k674x" (OuterVolumeSpecName: "kube-api-access-k674x") pod "06c3f62a-20ca-4faa-9b23-9f83db9be0c4" (UID: "06c3f62a-20ca-4faa-9b23-9f83db9be0c4"). InnerVolumeSpecName "kube-api-access-k674x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.020379 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06c3f62a-20ca-4faa-9b23-9f83db9be0c4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "06c3f62a-20ca-4faa-9b23-9f83db9be0c4" (UID: "06c3f62a-20ca-4faa-9b23-9f83db9be0c4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.095484 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06c3f62a-20ca-4faa-9b23-9f83db9be0c4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.095511 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06c3f62a-20ca-4faa-9b23-9f83db9be0c4-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.095520 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k674x\" (UniqueName: \"kubernetes.io/projected/06c3f62a-20ca-4faa-9b23-9f83db9be0c4-kube-api-access-k674x\") on node \"crc\" DevicePath \"\"" Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.430428 4871 generic.go:334] "Generic (PLEG): container finished" podID="06c3f62a-20ca-4faa-9b23-9f83db9be0c4" containerID="0eca58d8c0730563938d740148142dcaafe22f47f83df59ffaeb48e3e8ecbdd9" exitCode=0 Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.430508 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rst5l" event={"ID":"06c3f62a-20ca-4faa-9b23-9f83db9be0c4","Type":"ContainerDied","Data":"0eca58d8c0730563938d740148142dcaafe22f47f83df59ffaeb48e3e8ecbdd9"} Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.430562 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rst5l" event={"ID":"06c3f62a-20ca-4faa-9b23-9f83db9be0c4","Type":"ContainerDied","Data":"30f68c5d643e5e60591c45da3013639bd5504d60e64ead7fb9b2c179ba742c20"} Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.430586 4871 scope.go:117] "RemoveContainer" containerID="0eca58d8c0730563938d740148142dcaafe22f47f83df59ffaeb48e3e8ecbdd9" Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.430516 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rst5l" Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.459672 4871 scope.go:117] "RemoveContainer" containerID="f3e82497494bc65b26e2c3be7334fb1f3a82b2998d9cde86f05f017a6354a647" Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.491654 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rst5l"] Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.504202 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rst5l"] Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.507332 4871 scope.go:117] "RemoveContainer" containerID="1cfaf877d8f98ea24b3d5dae457610f4e05629ef13ee95b6871cbbec75045c44" Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.527558 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06c3f62a-20ca-4faa-9b23-9f83db9be0c4" path="/var/lib/kubelet/pods/06c3f62a-20ca-4faa-9b23-9f83db9be0c4/volumes" Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.574352 4871 scope.go:117] "RemoveContainer" containerID="0eca58d8c0730563938d740148142dcaafe22f47f83df59ffaeb48e3e8ecbdd9" Nov 26 05:49:36 crc kubenswrapper[4871]: E1126 05:49:36.575077 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0eca58d8c0730563938d740148142dcaafe22f47f83df59ffaeb48e3e8ecbdd9\": container with ID starting with 0eca58d8c0730563938d740148142dcaafe22f47f83df59ffaeb48e3e8ecbdd9 not found: ID does not exist" containerID="0eca58d8c0730563938d740148142dcaafe22f47f83df59ffaeb48e3e8ecbdd9" Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.575117 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0eca58d8c0730563938d740148142dcaafe22f47f83df59ffaeb48e3e8ecbdd9"} err="failed to get container status \"0eca58d8c0730563938d740148142dcaafe22f47f83df59ffaeb48e3e8ecbdd9\": rpc error: code = NotFound desc = could not find container \"0eca58d8c0730563938d740148142dcaafe22f47f83df59ffaeb48e3e8ecbdd9\": container with ID starting with 0eca58d8c0730563938d740148142dcaafe22f47f83df59ffaeb48e3e8ecbdd9 not found: ID does not exist" Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.575142 4871 scope.go:117] "RemoveContainer" containerID="f3e82497494bc65b26e2c3be7334fb1f3a82b2998d9cde86f05f017a6354a647" Nov 26 05:49:36 crc kubenswrapper[4871]: E1126 05:49:36.575591 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3e82497494bc65b26e2c3be7334fb1f3a82b2998d9cde86f05f017a6354a647\": container with ID starting with f3e82497494bc65b26e2c3be7334fb1f3a82b2998d9cde86f05f017a6354a647 not found: ID does not exist" containerID="f3e82497494bc65b26e2c3be7334fb1f3a82b2998d9cde86f05f017a6354a647" Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.575615 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3e82497494bc65b26e2c3be7334fb1f3a82b2998d9cde86f05f017a6354a647"} err="failed to get container status \"f3e82497494bc65b26e2c3be7334fb1f3a82b2998d9cde86f05f017a6354a647\": rpc error: code = NotFound desc = could not find container \"f3e82497494bc65b26e2c3be7334fb1f3a82b2998d9cde86f05f017a6354a647\": container with ID starting with f3e82497494bc65b26e2c3be7334fb1f3a82b2998d9cde86f05f017a6354a647 not found: ID does not exist" Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.575634 4871 scope.go:117] "RemoveContainer" containerID="1cfaf877d8f98ea24b3d5dae457610f4e05629ef13ee95b6871cbbec75045c44" Nov 26 05:49:36 crc kubenswrapper[4871]: E1126 05:49:36.576090 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1cfaf877d8f98ea24b3d5dae457610f4e05629ef13ee95b6871cbbec75045c44\": container with ID starting with 1cfaf877d8f98ea24b3d5dae457610f4e05629ef13ee95b6871cbbec75045c44 not found: ID does not exist" containerID="1cfaf877d8f98ea24b3d5dae457610f4e05629ef13ee95b6871cbbec75045c44" Nov 26 05:49:36 crc kubenswrapper[4871]: I1126 05:49:36.576155 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1cfaf877d8f98ea24b3d5dae457610f4e05629ef13ee95b6871cbbec75045c44"} err="failed to get container status \"1cfaf877d8f98ea24b3d5dae457610f4e05629ef13ee95b6871cbbec75045c44\": rpc error: code = NotFound desc = could not find container \"1cfaf877d8f98ea24b3d5dae457610f4e05629ef13ee95b6871cbbec75045c44\": container with ID starting with 1cfaf877d8f98ea24b3d5dae457610f4e05629ef13ee95b6871cbbec75045c44 not found: ID does not exist" Nov 26 05:49:39 crc kubenswrapper[4871]: I1126 05:49:39.504431 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fx8x2" Nov 26 05:49:39 crc kubenswrapper[4871]: I1126 05:49:39.557994 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fx8x2" Nov 26 05:49:40 crc kubenswrapper[4871]: I1126 05:49:40.484591 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fx8x2"] Nov 26 05:49:41 crc kubenswrapper[4871]: I1126 05:49:41.493029 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fx8x2" podUID="8e09f3b3-2624-44fe-8108-bb040a8b1252" containerName="registry-server" containerID="cri-o://b36303ed6690ab1f1d9499722d1b4702af4fbeabef0ced022233873ea45eb6c5" gracePeriod=2 Nov 26 05:49:41 crc kubenswrapper[4871]: I1126 05:49:41.993653 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fx8x2" Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.146574 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e09f3b3-2624-44fe-8108-bb040a8b1252-utilities\") pod \"8e09f3b3-2624-44fe-8108-bb040a8b1252\" (UID: \"8e09f3b3-2624-44fe-8108-bb040a8b1252\") " Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.146785 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e09f3b3-2624-44fe-8108-bb040a8b1252-catalog-content\") pod \"8e09f3b3-2624-44fe-8108-bb040a8b1252\" (UID: \"8e09f3b3-2624-44fe-8108-bb040a8b1252\") " Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.146919 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vs2pz\" (UniqueName: \"kubernetes.io/projected/8e09f3b3-2624-44fe-8108-bb040a8b1252-kube-api-access-vs2pz\") pod \"8e09f3b3-2624-44fe-8108-bb040a8b1252\" (UID: \"8e09f3b3-2624-44fe-8108-bb040a8b1252\") " Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.147856 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e09f3b3-2624-44fe-8108-bb040a8b1252-utilities" (OuterVolumeSpecName: "utilities") pod "8e09f3b3-2624-44fe-8108-bb040a8b1252" (UID: "8e09f3b3-2624-44fe-8108-bb040a8b1252"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.155996 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e09f3b3-2624-44fe-8108-bb040a8b1252-kube-api-access-vs2pz" (OuterVolumeSpecName: "kube-api-access-vs2pz") pod "8e09f3b3-2624-44fe-8108-bb040a8b1252" (UID: "8e09f3b3-2624-44fe-8108-bb040a8b1252"). InnerVolumeSpecName "kube-api-access-vs2pz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.249103 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e09f3b3-2624-44fe-8108-bb040a8b1252-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8e09f3b3-2624-44fe-8108-bb040a8b1252" (UID: "8e09f3b3-2624-44fe-8108-bb040a8b1252"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.249611 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e09f3b3-2624-44fe-8108-bb040a8b1252-catalog-content\") pod \"8e09f3b3-2624-44fe-8108-bb040a8b1252\" (UID: \"8e09f3b3-2624-44fe-8108-bb040a8b1252\") " Nov 26 05:49:42 crc kubenswrapper[4871]: W1126 05:49:42.249810 4871 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/8e09f3b3-2624-44fe-8108-bb040a8b1252/volumes/kubernetes.io~empty-dir/catalog-content Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.249825 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e09f3b3-2624-44fe-8108-bb040a8b1252-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8e09f3b3-2624-44fe-8108-bb040a8b1252" (UID: "8e09f3b3-2624-44fe-8108-bb040a8b1252"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.250778 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e09f3b3-2624-44fe-8108-bb040a8b1252-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.250817 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e09f3b3-2624-44fe-8108-bb040a8b1252-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.250839 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vs2pz\" (UniqueName: \"kubernetes.io/projected/8e09f3b3-2624-44fe-8108-bb040a8b1252-kube-api-access-vs2pz\") on node \"crc\" DevicePath \"\"" Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.516474 4871 generic.go:334] "Generic (PLEG): container finished" podID="8e09f3b3-2624-44fe-8108-bb040a8b1252" containerID="b36303ed6690ab1f1d9499722d1b4702af4fbeabef0ced022233873ea45eb6c5" exitCode=0 Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.516689 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fx8x2" Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.534972 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx8x2" event={"ID":"8e09f3b3-2624-44fe-8108-bb040a8b1252","Type":"ContainerDied","Data":"b36303ed6690ab1f1d9499722d1b4702af4fbeabef0ced022233873ea45eb6c5"} Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.535037 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fx8x2" event={"ID":"8e09f3b3-2624-44fe-8108-bb040a8b1252","Type":"ContainerDied","Data":"c6c375e9f9f37bf7a3cf446fbae3b1141641056b60004aba1f808810578440b1"} Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.535070 4871 scope.go:117] "RemoveContainer" containerID="b36303ed6690ab1f1d9499722d1b4702af4fbeabef0ced022233873ea45eb6c5" Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.580193 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fx8x2"] Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.592058 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fx8x2"] Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.594204 4871 scope.go:117] "RemoveContainer" containerID="4ea9976c2d1a87fb3e42e45554185fc4bda8323e5e347b73fe49e16e3444cfd6" Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.627804 4871 scope.go:117] "RemoveContainer" containerID="cd3b2703144726943ccb2180d148f4805fa25c14495964d4d10a5873514b39c3" Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.682441 4871 scope.go:117] "RemoveContainer" containerID="b36303ed6690ab1f1d9499722d1b4702af4fbeabef0ced022233873ea45eb6c5" Nov 26 05:49:42 crc kubenswrapper[4871]: E1126 05:49:42.682982 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b36303ed6690ab1f1d9499722d1b4702af4fbeabef0ced022233873ea45eb6c5\": container with ID starting with b36303ed6690ab1f1d9499722d1b4702af4fbeabef0ced022233873ea45eb6c5 not found: ID does not exist" containerID="b36303ed6690ab1f1d9499722d1b4702af4fbeabef0ced022233873ea45eb6c5" Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.683037 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b36303ed6690ab1f1d9499722d1b4702af4fbeabef0ced022233873ea45eb6c5"} err="failed to get container status \"b36303ed6690ab1f1d9499722d1b4702af4fbeabef0ced022233873ea45eb6c5\": rpc error: code = NotFound desc = could not find container \"b36303ed6690ab1f1d9499722d1b4702af4fbeabef0ced022233873ea45eb6c5\": container with ID starting with b36303ed6690ab1f1d9499722d1b4702af4fbeabef0ced022233873ea45eb6c5 not found: ID does not exist" Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.683064 4871 scope.go:117] "RemoveContainer" containerID="4ea9976c2d1a87fb3e42e45554185fc4bda8323e5e347b73fe49e16e3444cfd6" Nov 26 05:49:42 crc kubenswrapper[4871]: E1126 05:49:42.683472 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ea9976c2d1a87fb3e42e45554185fc4bda8323e5e347b73fe49e16e3444cfd6\": container with ID starting with 4ea9976c2d1a87fb3e42e45554185fc4bda8323e5e347b73fe49e16e3444cfd6 not found: ID does not exist" containerID="4ea9976c2d1a87fb3e42e45554185fc4bda8323e5e347b73fe49e16e3444cfd6" Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.683497 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ea9976c2d1a87fb3e42e45554185fc4bda8323e5e347b73fe49e16e3444cfd6"} err="failed to get container status \"4ea9976c2d1a87fb3e42e45554185fc4bda8323e5e347b73fe49e16e3444cfd6\": rpc error: code = NotFound desc = could not find container \"4ea9976c2d1a87fb3e42e45554185fc4bda8323e5e347b73fe49e16e3444cfd6\": container with ID starting with 4ea9976c2d1a87fb3e42e45554185fc4bda8323e5e347b73fe49e16e3444cfd6 not found: ID does not exist" Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.683516 4871 scope.go:117] "RemoveContainer" containerID="cd3b2703144726943ccb2180d148f4805fa25c14495964d4d10a5873514b39c3" Nov 26 05:49:42 crc kubenswrapper[4871]: E1126 05:49:42.683897 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd3b2703144726943ccb2180d148f4805fa25c14495964d4d10a5873514b39c3\": container with ID starting with cd3b2703144726943ccb2180d148f4805fa25c14495964d4d10a5873514b39c3 not found: ID does not exist" containerID="cd3b2703144726943ccb2180d148f4805fa25c14495964d4d10a5873514b39c3" Nov 26 05:49:42 crc kubenswrapper[4871]: I1126 05:49:42.683927 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd3b2703144726943ccb2180d148f4805fa25c14495964d4d10a5873514b39c3"} err="failed to get container status \"cd3b2703144726943ccb2180d148f4805fa25c14495964d4d10a5873514b39c3\": rpc error: code = NotFound desc = could not find container \"cd3b2703144726943ccb2180d148f4805fa25c14495964d4d10a5873514b39c3\": container with ID starting with cd3b2703144726943ccb2180d148f4805fa25c14495964d4d10a5873514b39c3 not found: ID does not exist" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.093137 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-29d7z"] Nov 26 05:49:43 crc kubenswrapper[4871]: E1126 05:49:43.093628 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e09f3b3-2624-44fe-8108-bb040a8b1252" containerName="extract-content" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.093648 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e09f3b3-2624-44fe-8108-bb040a8b1252" containerName="extract-content" Nov 26 05:49:43 crc kubenswrapper[4871]: E1126 05:49:43.093677 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e09f3b3-2624-44fe-8108-bb040a8b1252" containerName="registry-server" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.093686 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e09f3b3-2624-44fe-8108-bb040a8b1252" containerName="registry-server" Nov 26 05:49:43 crc kubenswrapper[4871]: E1126 05:49:43.093701 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e09f3b3-2624-44fe-8108-bb040a8b1252" containerName="extract-utilities" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.093709 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e09f3b3-2624-44fe-8108-bb040a8b1252" containerName="extract-utilities" Nov 26 05:49:43 crc kubenswrapper[4871]: E1126 05:49:43.093736 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06c3f62a-20ca-4faa-9b23-9f83db9be0c4" containerName="extract-content" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.093745 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="06c3f62a-20ca-4faa-9b23-9f83db9be0c4" containerName="extract-content" Nov 26 05:49:43 crc kubenswrapper[4871]: E1126 05:49:43.093762 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06c3f62a-20ca-4faa-9b23-9f83db9be0c4" containerName="registry-server" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.093769 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="06c3f62a-20ca-4faa-9b23-9f83db9be0c4" containerName="registry-server" Nov 26 05:49:43 crc kubenswrapper[4871]: E1126 05:49:43.093784 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06c3f62a-20ca-4faa-9b23-9f83db9be0c4" containerName="extract-utilities" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.093791 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="06c3f62a-20ca-4faa-9b23-9f83db9be0c4" containerName="extract-utilities" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.094016 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="06c3f62a-20ca-4faa-9b23-9f83db9be0c4" containerName="registry-server" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.094033 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e09f3b3-2624-44fe-8108-bb040a8b1252" containerName="registry-server" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.095759 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-29d7z" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.136347 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-29d7z"] Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.171136 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b-catalog-content\") pod \"community-operators-29d7z\" (UID: \"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b\") " pod="openshift-marketplace/community-operators-29d7z" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.171295 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b-utilities\") pod \"community-operators-29d7z\" (UID: \"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b\") " pod="openshift-marketplace/community-operators-29d7z" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.171337 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwnsc\" (UniqueName: \"kubernetes.io/projected/8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b-kube-api-access-qwnsc\") pod \"community-operators-29d7z\" (UID: \"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b\") " pod="openshift-marketplace/community-operators-29d7z" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.273257 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b-catalog-content\") pod \"community-operators-29d7z\" (UID: \"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b\") " pod="openshift-marketplace/community-operators-29d7z" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.273377 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b-utilities\") pod \"community-operators-29d7z\" (UID: \"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b\") " pod="openshift-marketplace/community-operators-29d7z" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.273417 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwnsc\" (UniqueName: \"kubernetes.io/projected/8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b-kube-api-access-qwnsc\") pod \"community-operators-29d7z\" (UID: \"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b\") " pod="openshift-marketplace/community-operators-29d7z" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.273940 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b-utilities\") pod \"community-operators-29d7z\" (UID: \"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b\") " pod="openshift-marketplace/community-operators-29d7z" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.273961 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b-catalog-content\") pod \"community-operators-29d7z\" (UID: \"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b\") " pod="openshift-marketplace/community-operators-29d7z" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.306705 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwnsc\" (UniqueName: \"kubernetes.io/projected/8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b-kube-api-access-qwnsc\") pod \"community-operators-29d7z\" (UID: \"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b\") " pod="openshift-marketplace/community-operators-29d7z" Nov 26 05:49:43 crc kubenswrapper[4871]: I1126 05:49:43.436011 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-29d7z" Nov 26 05:49:44 crc kubenswrapper[4871]: W1126 05:49:44.089651 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8f6c71aa_6e12_4c50_97a7_c6aedc6ce17b.slice/crio-845e8a513a66ffd037ad8e0e9bc1bab6a729d32e3b11d2975a344c3d0460bc32 WatchSource:0}: Error finding container 845e8a513a66ffd037ad8e0e9bc1bab6a729d32e3b11d2975a344c3d0460bc32: Status 404 returned error can't find the container with id 845e8a513a66ffd037ad8e0e9bc1bab6a729d32e3b11d2975a344c3d0460bc32 Nov 26 05:49:44 crc kubenswrapper[4871]: I1126 05:49:44.090007 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-29d7z"] Nov 26 05:49:44 crc kubenswrapper[4871]: I1126 05:49:44.523339 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e09f3b3-2624-44fe-8108-bb040a8b1252" path="/var/lib/kubelet/pods/8e09f3b3-2624-44fe-8108-bb040a8b1252/volumes" Nov 26 05:49:44 crc kubenswrapper[4871]: I1126 05:49:44.550721 4871 generic.go:334] "Generic (PLEG): container finished" podID="8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b" containerID="e139b0e120bcd945b4f0b0e19fe3dd8103d18bfd5f306e02cf3f7bf0da51c34e" exitCode=0 Nov 26 05:49:44 crc kubenswrapper[4871]: I1126 05:49:44.550778 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29d7z" event={"ID":"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b","Type":"ContainerDied","Data":"e139b0e120bcd945b4f0b0e19fe3dd8103d18bfd5f306e02cf3f7bf0da51c34e"} Nov 26 05:49:44 crc kubenswrapper[4871]: I1126 05:49:44.550814 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29d7z" event={"ID":"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b","Type":"ContainerStarted","Data":"845e8a513a66ffd037ad8e0e9bc1bab6a729d32e3b11d2975a344c3d0460bc32"} Nov 26 05:49:45 crc kubenswrapper[4871]: I1126 05:49:45.567600 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29d7z" event={"ID":"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b","Type":"ContainerStarted","Data":"208987ab1d0543712d6d9348d5153d613c392587711d324818e99108533c27b9"} Nov 26 05:49:47 crc kubenswrapper[4871]: I1126 05:49:47.590211 4871 generic.go:334] "Generic (PLEG): container finished" podID="8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b" containerID="208987ab1d0543712d6d9348d5153d613c392587711d324818e99108533c27b9" exitCode=0 Nov 26 05:49:47 crc kubenswrapper[4871]: I1126 05:49:47.590289 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29d7z" event={"ID":"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b","Type":"ContainerDied","Data":"208987ab1d0543712d6d9348d5153d613c392587711d324818e99108533c27b9"} Nov 26 05:49:48 crc kubenswrapper[4871]: I1126 05:49:48.604747 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29d7z" event={"ID":"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b","Type":"ContainerStarted","Data":"37f7350aca57cfc5b63727e514f61099dbbf0714e0fd70fa8ab57adb9885d6bd"} Nov 26 05:49:48 crc kubenswrapper[4871]: I1126 05:49:48.641361 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-29d7z" podStartSLOduration=2.174203441 podStartE2EDuration="5.641336733s" podCreationTimestamp="2025-11-26 05:49:43 +0000 UTC" firstStartedPulling="2025-11-26 05:49:44.552978325 +0000 UTC m=+1442.736029931" lastFinishedPulling="2025-11-26 05:49:48.020111637 +0000 UTC m=+1446.203163223" observedRunningTime="2025-11-26 05:49:48.628072333 +0000 UTC m=+1446.811123939" watchObservedRunningTime="2025-11-26 05:49:48.641336733 +0000 UTC m=+1446.824388359" Nov 26 05:49:49 crc kubenswrapper[4871]: E1126 05:49:49.830503 4871 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e09f3b3_2624_44fe_8108_bb040a8b1252.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e09f3b3_2624_44fe_8108_bb040a8b1252.slice/crio-c6c375e9f9f37bf7a3cf446fbae3b1141641056b60004aba1f808810578440b1\": RecentStats: unable to find data in memory cache]" Nov 26 05:49:53 crc kubenswrapper[4871]: I1126 05:49:53.436147 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-29d7z" Nov 26 05:49:53 crc kubenswrapper[4871]: I1126 05:49:53.436564 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-29d7z" Nov 26 05:49:53 crc kubenswrapper[4871]: I1126 05:49:53.491415 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-29d7z" Nov 26 05:49:53 crc kubenswrapper[4871]: I1126 05:49:53.758055 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-29d7z" Nov 26 05:49:53 crc kubenswrapper[4871]: I1126 05:49:53.811343 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-29d7z"] Nov 26 05:49:55 crc kubenswrapper[4871]: I1126 05:49:55.713119 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-29d7z" podUID="8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b" containerName="registry-server" containerID="cri-o://37f7350aca57cfc5b63727e514f61099dbbf0714e0fd70fa8ab57adb9885d6bd" gracePeriod=2 Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.282703 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-29d7z" Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.422012 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qwnsc\" (UniqueName: \"kubernetes.io/projected/8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b-kube-api-access-qwnsc\") pod \"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b\" (UID: \"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b\") " Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.422751 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b-utilities\") pod \"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b\" (UID: \"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b\") " Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.422906 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b-catalog-content\") pod \"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b\" (UID: \"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b\") " Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.424448 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b-utilities" (OuterVolumeSpecName: "utilities") pod "8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b" (UID: "8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.431375 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b-kube-api-access-qwnsc" (OuterVolumeSpecName: "kube-api-access-qwnsc") pod "8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b" (UID: "8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b"). InnerVolumeSpecName "kube-api-access-qwnsc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.525924 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qwnsc\" (UniqueName: \"kubernetes.io/projected/8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b-kube-api-access-qwnsc\") on node \"crc\" DevicePath \"\"" Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.525961 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.617407 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b" (UID: "8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.627901 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.735175 4871 generic.go:334] "Generic (PLEG): container finished" podID="8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b" containerID="37f7350aca57cfc5b63727e514f61099dbbf0714e0fd70fa8ab57adb9885d6bd" exitCode=0 Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.735229 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29d7z" event={"ID":"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b","Type":"ContainerDied","Data":"37f7350aca57cfc5b63727e514f61099dbbf0714e0fd70fa8ab57adb9885d6bd"} Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.735264 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-29d7z" event={"ID":"8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b","Type":"ContainerDied","Data":"845e8a513a66ffd037ad8e0e9bc1bab6a729d32e3b11d2975a344c3d0460bc32"} Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.735282 4871 scope.go:117] "RemoveContainer" containerID="37f7350aca57cfc5b63727e514f61099dbbf0714e0fd70fa8ab57adb9885d6bd" Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.735396 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-29d7z" Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.769079 4871 scope.go:117] "RemoveContainer" containerID="208987ab1d0543712d6d9348d5153d613c392587711d324818e99108533c27b9" Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.800013 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-29d7z"] Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.802761 4871 scope.go:117] "RemoveContainer" containerID="e139b0e120bcd945b4f0b0e19fe3dd8103d18bfd5f306e02cf3f7bf0da51c34e" Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.812594 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-29d7z"] Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.851277 4871 scope.go:117] "RemoveContainer" containerID="37f7350aca57cfc5b63727e514f61099dbbf0714e0fd70fa8ab57adb9885d6bd" Nov 26 05:49:56 crc kubenswrapper[4871]: E1126 05:49:56.851737 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37f7350aca57cfc5b63727e514f61099dbbf0714e0fd70fa8ab57adb9885d6bd\": container with ID starting with 37f7350aca57cfc5b63727e514f61099dbbf0714e0fd70fa8ab57adb9885d6bd not found: ID does not exist" containerID="37f7350aca57cfc5b63727e514f61099dbbf0714e0fd70fa8ab57adb9885d6bd" Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.851779 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37f7350aca57cfc5b63727e514f61099dbbf0714e0fd70fa8ab57adb9885d6bd"} err="failed to get container status \"37f7350aca57cfc5b63727e514f61099dbbf0714e0fd70fa8ab57adb9885d6bd\": rpc error: code = NotFound desc = could not find container \"37f7350aca57cfc5b63727e514f61099dbbf0714e0fd70fa8ab57adb9885d6bd\": container with ID starting with 37f7350aca57cfc5b63727e514f61099dbbf0714e0fd70fa8ab57adb9885d6bd not found: ID does not exist" Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.851804 4871 scope.go:117] "RemoveContainer" containerID="208987ab1d0543712d6d9348d5153d613c392587711d324818e99108533c27b9" Nov 26 05:49:56 crc kubenswrapper[4871]: E1126 05:49:56.852067 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"208987ab1d0543712d6d9348d5153d613c392587711d324818e99108533c27b9\": container with ID starting with 208987ab1d0543712d6d9348d5153d613c392587711d324818e99108533c27b9 not found: ID does not exist" containerID="208987ab1d0543712d6d9348d5153d613c392587711d324818e99108533c27b9" Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.852115 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"208987ab1d0543712d6d9348d5153d613c392587711d324818e99108533c27b9"} err="failed to get container status \"208987ab1d0543712d6d9348d5153d613c392587711d324818e99108533c27b9\": rpc error: code = NotFound desc = could not find container \"208987ab1d0543712d6d9348d5153d613c392587711d324818e99108533c27b9\": container with ID starting with 208987ab1d0543712d6d9348d5153d613c392587711d324818e99108533c27b9 not found: ID does not exist" Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.852147 4871 scope.go:117] "RemoveContainer" containerID="e139b0e120bcd945b4f0b0e19fe3dd8103d18bfd5f306e02cf3f7bf0da51c34e" Nov 26 05:49:56 crc kubenswrapper[4871]: E1126 05:49:56.852604 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e139b0e120bcd945b4f0b0e19fe3dd8103d18bfd5f306e02cf3f7bf0da51c34e\": container with ID starting with e139b0e120bcd945b4f0b0e19fe3dd8103d18bfd5f306e02cf3f7bf0da51c34e not found: ID does not exist" containerID="e139b0e120bcd945b4f0b0e19fe3dd8103d18bfd5f306e02cf3f7bf0da51c34e" Nov 26 05:49:56 crc kubenswrapper[4871]: I1126 05:49:56.852642 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e139b0e120bcd945b4f0b0e19fe3dd8103d18bfd5f306e02cf3f7bf0da51c34e"} err="failed to get container status \"e139b0e120bcd945b4f0b0e19fe3dd8103d18bfd5f306e02cf3f7bf0da51c34e\": rpc error: code = NotFound desc = could not find container \"e139b0e120bcd945b4f0b0e19fe3dd8103d18bfd5f306e02cf3f7bf0da51c34e\": container with ID starting with e139b0e120bcd945b4f0b0e19fe3dd8103d18bfd5f306e02cf3f7bf0da51c34e not found: ID does not exist" Nov 26 05:49:58 crc kubenswrapper[4871]: I1126 05:49:58.532042 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b" path="/var/lib/kubelet/pods/8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b/volumes" Nov 26 05:50:00 crc kubenswrapper[4871]: E1126 05:50:00.123370 4871 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e09f3b3_2624_44fe_8108_bb040a8b1252.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e09f3b3_2624_44fe_8108_bb040a8b1252.slice/crio-c6c375e9f9f37bf7a3cf446fbae3b1141641056b60004aba1f808810578440b1\": RecentStats: unable to find data in memory cache]" Nov 26 05:50:08 crc kubenswrapper[4871]: I1126 05:50:08.367888 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wq5qf"] Nov 26 05:50:08 crc kubenswrapper[4871]: E1126 05:50:08.369014 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b" containerName="extract-utilities" Nov 26 05:50:08 crc kubenswrapper[4871]: I1126 05:50:08.369034 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b" containerName="extract-utilities" Nov 26 05:50:08 crc kubenswrapper[4871]: E1126 05:50:08.369054 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b" containerName="registry-server" Nov 26 05:50:08 crc kubenswrapper[4871]: I1126 05:50:08.369063 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b" containerName="registry-server" Nov 26 05:50:08 crc kubenswrapper[4871]: E1126 05:50:08.369078 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b" containerName="extract-content" Nov 26 05:50:08 crc kubenswrapper[4871]: I1126 05:50:08.369088 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b" containerName="extract-content" Nov 26 05:50:08 crc kubenswrapper[4871]: I1126 05:50:08.369344 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f6c71aa-6e12-4c50-97a7-c6aedc6ce17b" containerName="registry-server" Nov 26 05:50:08 crc kubenswrapper[4871]: I1126 05:50:08.371840 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wq5qf" Nov 26 05:50:08 crc kubenswrapper[4871]: I1126 05:50:08.387943 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wq5qf"] Nov 26 05:50:08 crc kubenswrapper[4871]: I1126 05:50:08.485827 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e014705-5812-4158-af04-274819aba269-catalog-content\") pod \"certified-operators-wq5qf\" (UID: \"4e014705-5812-4158-af04-274819aba269\") " pod="openshift-marketplace/certified-operators-wq5qf" Nov 26 05:50:08 crc kubenswrapper[4871]: I1126 05:50:08.485909 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkjx9\" (UniqueName: \"kubernetes.io/projected/4e014705-5812-4158-af04-274819aba269-kube-api-access-mkjx9\") pod \"certified-operators-wq5qf\" (UID: \"4e014705-5812-4158-af04-274819aba269\") " pod="openshift-marketplace/certified-operators-wq5qf" Nov 26 05:50:08 crc kubenswrapper[4871]: I1126 05:50:08.485974 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e014705-5812-4158-af04-274819aba269-utilities\") pod \"certified-operators-wq5qf\" (UID: \"4e014705-5812-4158-af04-274819aba269\") " pod="openshift-marketplace/certified-operators-wq5qf" Nov 26 05:50:08 crc kubenswrapper[4871]: I1126 05:50:08.587801 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e014705-5812-4158-af04-274819aba269-utilities\") pod \"certified-operators-wq5qf\" (UID: \"4e014705-5812-4158-af04-274819aba269\") " pod="openshift-marketplace/certified-operators-wq5qf" Nov 26 05:50:08 crc kubenswrapper[4871]: I1126 05:50:08.588040 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e014705-5812-4158-af04-274819aba269-catalog-content\") pod \"certified-operators-wq5qf\" (UID: \"4e014705-5812-4158-af04-274819aba269\") " pod="openshift-marketplace/certified-operators-wq5qf" Nov 26 05:50:08 crc kubenswrapper[4871]: I1126 05:50:08.588097 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkjx9\" (UniqueName: \"kubernetes.io/projected/4e014705-5812-4158-af04-274819aba269-kube-api-access-mkjx9\") pod \"certified-operators-wq5qf\" (UID: \"4e014705-5812-4158-af04-274819aba269\") " pod="openshift-marketplace/certified-operators-wq5qf" Nov 26 05:50:08 crc kubenswrapper[4871]: I1126 05:50:08.588344 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e014705-5812-4158-af04-274819aba269-utilities\") pod \"certified-operators-wq5qf\" (UID: \"4e014705-5812-4158-af04-274819aba269\") " pod="openshift-marketplace/certified-operators-wq5qf" Nov 26 05:50:08 crc kubenswrapper[4871]: I1126 05:50:08.588399 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e014705-5812-4158-af04-274819aba269-catalog-content\") pod \"certified-operators-wq5qf\" (UID: \"4e014705-5812-4158-af04-274819aba269\") " pod="openshift-marketplace/certified-operators-wq5qf" Nov 26 05:50:08 crc kubenswrapper[4871]: I1126 05:50:08.610240 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkjx9\" (UniqueName: \"kubernetes.io/projected/4e014705-5812-4158-af04-274819aba269-kube-api-access-mkjx9\") pod \"certified-operators-wq5qf\" (UID: \"4e014705-5812-4158-af04-274819aba269\") " pod="openshift-marketplace/certified-operators-wq5qf" Nov 26 05:50:08 crc kubenswrapper[4871]: I1126 05:50:08.703224 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wq5qf" Nov 26 05:50:09 crc kubenswrapper[4871]: I1126 05:50:09.223442 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wq5qf"] Nov 26 05:50:09 crc kubenswrapper[4871]: I1126 05:50:09.903756 4871 generic.go:334] "Generic (PLEG): container finished" podID="4e014705-5812-4158-af04-274819aba269" containerID="dc5a68078079bb5a5efc30ad797f7be6526107a86aa59ef41611166d17d22f6c" exitCode=0 Nov 26 05:50:09 crc kubenswrapper[4871]: I1126 05:50:09.903847 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wq5qf" event={"ID":"4e014705-5812-4158-af04-274819aba269","Type":"ContainerDied","Data":"dc5a68078079bb5a5efc30ad797f7be6526107a86aa59ef41611166d17d22f6c"} Nov 26 05:50:09 crc kubenswrapper[4871]: I1126 05:50:09.904435 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wq5qf" event={"ID":"4e014705-5812-4158-af04-274819aba269","Type":"ContainerStarted","Data":"5b45a75167df4db570c2127775c7f5b4e2f54a107f091f4ca5f264e94e96f454"} Nov 26 05:50:10 crc kubenswrapper[4871]: E1126 05:50:10.444516 4871 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e09f3b3_2624_44fe_8108_bb040a8b1252.slice/crio-c6c375e9f9f37bf7a3cf446fbae3b1141641056b60004aba1f808810578440b1\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e09f3b3_2624_44fe_8108_bb040a8b1252.slice\": RecentStats: unable to find data in memory cache]" Nov 26 05:50:11 crc kubenswrapper[4871]: I1126 05:50:11.929841 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wq5qf" event={"ID":"4e014705-5812-4158-af04-274819aba269","Type":"ContainerStarted","Data":"b8792cf880bc9e42566c17f2d8cd3c6263f47185b12574bdab5ac0902a5c31f0"} Nov 26 05:50:12 crc kubenswrapper[4871]: I1126 05:50:12.942202 4871 generic.go:334] "Generic (PLEG): container finished" podID="4e014705-5812-4158-af04-274819aba269" containerID="b8792cf880bc9e42566c17f2d8cd3c6263f47185b12574bdab5ac0902a5c31f0" exitCode=0 Nov 26 05:50:12 crc kubenswrapper[4871]: I1126 05:50:12.942511 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wq5qf" event={"ID":"4e014705-5812-4158-af04-274819aba269","Type":"ContainerDied","Data":"b8792cf880bc9e42566c17f2d8cd3c6263f47185b12574bdab5ac0902a5c31f0"} Nov 26 05:50:13 crc kubenswrapper[4871]: I1126 05:50:13.960042 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wq5qf" event={"ID":"4e014705-5812-4158-af04-274819aba269","Type":"ContainerStarted","Data":"4a293b937c8d2ff586b67f4b9abd447fa94dea2b6fe3b01aab11ff60af4cd555"} Nov 26 05:50:18 crc kubenswrapper[4871]: I1126 05:50:18.703798 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wq5qf" Nov 26 05:50:18 crc kubenswrapper[4871]: I1126 05:50:18.704361 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wq5qf" Nov 26 05:50:18 crc kubenswrapper[4871]: I1126 05:50:18.758417 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wq5qf" Nov 26 05:50:18 crc kubenswrapper[4871]: I1126 05:50:18.792763 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wq5qf" podStartSLOduration=7.349737964 podStartE2EDuration="10.792733736s" podCreationTimestamp="2025-11-26 05:50:08 +0000 UTC" firstStartedPulling="2025-11-26 05:50:09.905999002 +0000 UTC m=+1468.089050588" lastFinishedPulling="2025-11-26 05:50:13.348994774 +0000 UTC m=+1471.532046360" observedRunningTime="2025-11-26 05:50:13.988195856 +0000 UTC m=+1472.171247462" watchObservedRunningTime="2025-11-26 05:50:18.792733736 +0000 UTC m=+1476.975785362" Nov 26 05:50:19 crc kubenswrapper[4871]: I1126 05:50:19.092503 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wq5qf" Nov 26 05:50:19 crc kubenswrapper[4871]: I1126 05:50:19.152436 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wq5qf"] Nov 26 05:50:19 crc kubenswrapper[4871]: I1126 05:50:19.280592 4871 scope.go:117] "RemoveContainer" containerID="4ed36338b1de980506adbc0c7934b42033af86f0054606599c6677f07e96669b" Nov 26 05:50:19 crc kubenswrapper[4871]: I1126 05:50:19.318783 4871 scope.go:117] "RemoveContainer" containerID="ae8ac99537258d7df8927e67f7ad624c60d74e466377d617ef3ff08389e1e3fd" Nov 26 05:50:19 crc kubenswrapper[4871]: I1126 05:50:19.550117 4871 scope.go:117] "RemoveContainer" containerID="80f653f1f49ea6a8bc0a49af029f7235a6e20f2c0333fc2f21be1d9d654ec329" Nov 26 05:50:19 crc kubenswrapper[4871]: I1126 05:50:19.609095 4871 scope.go:117] "RemoveContainer" containerID="4b3f7ef4dd996794249355cfc7b0eae92d7ebc4d4d9080ced156cb574b241e25" Nov 26 05:50:19 crc kubenswrapper[4871]: I1126 05:50:19.637112 4871 scope.go:117] "RemoveContainer" containerID="f5481d814a3efcc61a3330ac7e4e3e5f3899e1e7c4082c57528613cb9400a3cd" Nov 26 05:50:20 crc kubenswrapper[4871]: E1126 05:50:20.756258 4871 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e09f3b3_2624_44fe_8108_bb040a8b1252.slice/crio-c6c375e9f9f37bf7a3cf446fbae3b1141641056b60004aba1f808810578440b1\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e09f3b3_2624_44fe_8108_bb040a8b1252.slice\": RecentStats: unable to find data in memory cache]" Nov 26 05:50:21 crc kubenswrapper[4871]: I1126 05:50:21.066628 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wq5qf" podUID="4e014705-5812-4158-af04-274819aba269" containerName="registry-server" containerID="cri-o://4a293b937c8d2ff586b67f4b9abd447fa94dea2b6fe3b01aab11ff60af4cd555" gracePeriod=2 Nov 26 05:50:21 crc kubenswrapper[4871]: I1126 05:50:21.636406 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wq5qf" Nov 26 05:50:21 crc kubenswrapper[4871]: I1126 05:50:21.759603 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkjx9\" (UniqueName: \"kubernetes.io/projected/4e014705-5812-4158-af04-274819aba269-kube-api-access-mkjx9\") pod \"4e014705-5812-4158-af04-274819aba269\" (UID: \"4e014705-5812-4158-af04-274819aba269\") " Nov 26 05:50:21 crc kubenswrapper[4871]: I1126 05:50:21.759668 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e014705-5812-4158-af04-274819aba269-utilities\") pod \"4e014705-5812-4158-af04-274819aba269\" (UID: \"4e014705-5812-4158-af04-274819aba269\") " Nov 26 05:50:21 crc kubenswrapper[4871]: I1126 05:50:21.759820 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e014705-5812-4158-af04-274819aba269-catalog-content\") pod \"4e014705-5812-4158-af04-274819aba269\" (UID: \"4e014705-5812-4158-af04-274819aba269\") " Nov 26 05:50:21 crc kubenswrapper[4871]: I1126 05:50:21.760463 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e014705-5812-4158-af04-274819aba269-utilities" (OuterVolumeSpecName: "utilities") pod "4e014705-5812-4158-af04-274819aba269" (UID: "4e014705-5812-4158-af04-274819aba269"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:50:21 crc kubenswrapper[4871]: I1126 05:50:21.760626 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e014705-5812-4158-af04-274819aba269-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 05:50:21 crc kubenswrapper[4871]: I1126 05:50:21.771828 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e014705-5812-4158-af04-274819aba269-kube-api-access-mkjx9" (OuterVolumeSpecName: "kube-api-access-mkjx9") pod "4e014705-5812-4158-af04-274819aba269" (UID: "4e014705-5812-4158-af04-274819aba269"). InnerVolumeSpecName "kube-api-access-mkjx9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:50:21 crc kubenswrapper[4871]: I1126 05:50:21.861951 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkjx9\" (UniqueName: \"kubernetes.io/projected/4e014705-5812-4158-af04-274819aba269-kube-api-access-mkjx9\") on node \"crc\" DevicePath \"\"" Nov 26 05:50:22 crc kubenswrapper[4871]: I1126 05:50:22.003886 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e014705-5812-4158-af04-274819aba269-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4e014705-5812-4158-af04-274819aba269" (UID: "4e014705-5812-4158-af04-274819aba269"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 05:50:22 crc kubenswrapper[4871]: I1126 05:50:22.066498 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e014705-5812-4158-af04-274819aba269-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 05:50:22 crc kubenswrapper[4871]: I1126 05:50:22.083017 4871 generic.go:334] "Generic (PLEG): container finished" podID="4e014705-5812-4158-af04-274819aba269" containerID="4a293b937c8d2ff586b67f4b9abd447fa94dea2b6fe3b01aab11ff60af4cd555" exitCode=0 Nov 26 05:50:22 crc kubenswrapper[4871]: I1126 05:50:22.083083 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wq5qf" event={"ID":"4e014705-5812-4158-af04-274819aba269","Type":"ContainerDied","Data":"4a293b937c8d2ff586b67f4b9abd447fa94dea2b6fe3b01aab11ff60af4cd555"} Nov 26 05:50:22 crc kubenswrapper[4871]: I1126 05:50:22.083127 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wq5qf" event={"ID":"4e014705-5812-4158-af04-274819aba269","Type":"ContainerDied","Data":"5b45a75167df4db570c2127775c7f5b4e2f54a107f091f4ca5f264e94e96f454"} Nov 26 05:50:22 crc kubenswrapper[4871]: I1126 05:50:22.083155 4871 scope.go:117] "RemoveContainer" containerID="4a293b937c8d2ff586b67f4b9abd447fa94dea2b6fe3b01aab11ff60af4cd555" Nov 26 05:50:22 crc kubenswrapper[4871]: I1126 05:50:22.083249 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wq5qf" Nov 26 05:50:22 crc kubenswrapper[4871]: I1126 05:50:22.111329 4871 scope.go:117] "RemoveContainer" containerID="b8792cf880bc9e42566c17f2d8cd3c6263f47185b12574bdab5ac0902a5c31f0" Nov 26 05:50:22 crc kubenswrapper[4871]: I1126 05:50:22.148667 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wq5qf"] Nov 26 05:50:22 crc kubenswrapper[4871]: I1126 05:50:22.150278 4871 scope.go:117] "RemoveContainer" containerID="dc5a68078079bb5a5efc30ad797f7be6526107a86aa59ef41611166d17d22f6c" Nov 26 05:50:22 crc kubenswrapper[4871]: I1126 05:50:22.162457 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wq5qf"] Nov 26 05:50:22 crc kubenswrapper[4871]: I1126 05:50:22.196254 4871 scope.go:117] "RemoveContainer" containerID="4a293b937c8d2ff586b67f4b9abd447fa94dea2b6fe3b01aab11ff60af4cd555" Nov 26 05:50:22 crc kubenswrapper[4871]: E1126 05:50:22.197568 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a293b937c8d2ff586b67f4b9abd447fa94dea2b6fe3b01aab11ff60af4cd555\": container with ID starting with 4a293b937c8d2ff586b67f4b9abd447fa94dea2b6fe3b01aab11ff60af4cd555 not found: ID does not exist" containerID="4a293b937c8d2ff586b67f4b9abd447fa94dea2b6fe3b01aab11ff60af4cd555" Nov 26 05:50:22 crc kubenswrapper[4871]: I1126 05:50:22.197635 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a293b937c8d2ff586b67f4b9abd447fa94dea2b6fe3b01aab11ff60af4cd555"} err="failed to get container status \"4a293b937c8d2ff586b67f4b9abd447fa94dea2b6fe3b01aab11ff60af4cd555\": rpc error: code = NotFound desc = could not find container \"4a293b937c8d2ff586b67f4b9abd447fa94dea2b6fe3b01aab11ff60af4cd555\": container with ID starting with 4a293b937c8d2ff586b67f4b9abd447fa94dea2b6fe3b01aab11ff60af4cd555 not found: ID does not exist" Nov 26 05:50:22 crc kubenswrapper[4871]: I1126 05:50:22.197684 4871 scope.go:117] "RemoveContainer" containerID="b8792cf880bc9e42566c17f2d8cd3c6263f47185b12574bdab5ac0902a5c31f0" Nov 26 05:50:22 crc kubenswrapper[4871]: E1126 05:50:22.198149 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8792cf880bc9e42566c17f2d8cd3c6263f47185b12574bdab5ac0902a5c31f0\": container with ID starting with b8792cf880bc9e42566c17f2d8cd3c6263f47185b12574bdab5ac0902a5c31f0 not found: ID does not exist" containerID="b8792cf880bc9e42566c17f2d8cd3c6263f47185b12574bdab5ac0902a5c31f0" Nov 26 05:50:22 crc kubenswrapper[4871]: I1126 05:50:22.198284 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8792cf880bc9e42566c17f2d8cd3c6263f47185b12574bdab5ac0902a5c31f0"} err="failed to get container status \"b8792cf880bc9e42566c17f2d8cd3c6263f47185b12574bdab5ac0902a5c31f0\": rpc error: code = NotFound desc = could not find container \"b8792cf880bc9e42566c17f2d8cd3c6263f47185b12574bdab5ac0902a5c31f0\": container with ID starting with b8792cf880bc9e42566c17f2d8cd3c6263f47185b12574bdab5ac0902a5c31f0 not found: ID does not exist" Nov 26 05:50:22 crc kubenswrapper[4871]: I1126 05:50:22.198444 4871 scope.go:117] "RemoveContainer" containerID="dc5a68078079bb5a5efc30ad797f7be6526107a86aa59ef41611166d17d22f6c" Nov 26 05:50:22 crc kubenswrapper[4871]: E1126 05:50:22.198993 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc5a68078079bb5a5efc30ad797f7be6526107a86aa59ef41611166d17d22f6c\": container with ID starting with dc5a68078079bb5a5efc30ad797f7be6526107a86aa59ef41611166d17d22f6c not found: ID does not exist" containerID="dc5a68078079bb5a5efc30ad797f7be6526107a86aa59ef41611166d17d22f6c" Nov 26 05:50:22 crc kubenswrapper[4871]: I1126 05:50:22.199043 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc5a68078079bb5a5efc30ad797f7be6526107a86aa59ef41611166d17d22f6c"} err="failed to get container status \"dc5a68078079bb5a5efc30ad797f7be6526107a86aa59ef41611166d17d22f6c\": rpc error: code = NotFound desc = could not find container \"dc5a68078079bb5a5efc30ad797f7be6526107a86aa59ef41611166d17d22f6c\": container with ID starting with dc5a68078079bb5a5efc30ad797f7be6526107a86aa59ef41611166d17d22f6c not found: ID does not exist" Nov 26 05:50:22 crc kubenswrapper[4871]: I1126 05:50:22.549122 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e014705-5812-4158-af04-274819aba269" path="/var/lib/kubelet/pods/4e014705-5812-4158-af04-274819aba269/volumes" Nov 26 05:50:23 crc kubenswrapper[4871]: I1126 05:50:23.615385 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:50:23 crc kubenswrapper[4871]: I1126 05:50:23.615457 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:50:31 crc kubenswrapper[4871]: E1126 05:50:31.070972 4871 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e09f3b3_2624_44fe_8108_bb040a8b1252.slice/crio-c6c375e9f9f37bf7a3cf446fbae3b1141641056b60004aba1f808810578440b1\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e09f3b3_2624_44fe_8108_bb040a8b1252.slice\": RecentStats: unable to find data in memory cache]" Nov 26 05:50:41 crc kubenswrapper[4871]: E1126 05:50:41.311030 4871 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e09f3b3_2624_44fe_8108_bb040a8b1252.slice/crio-c6c375e9f9f37bf7a3cf446fbae3b1141641056b60004aba1f808810578440b1\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e09f3b3_2624_44fe_8108_bb040a8b1252.slice\": RecentStats: unable to find data in memory cache]" Nov 26 05:50:53 crc kubenswrapper[4871]: I1126 05:50:53.614506 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:50:53 crc kubenswrapper[4871]: I1126 05:50:53.615081 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:51:19 crc kubenswrapper[4871]: I1126 05:51:19.776404 4871 scope.go:117] "RemoveContainer" containerID="0ecb79534560d8f2f10ebe835141ff761444eaff9dc3f528737ed699b8494ed2" Nov 26 05:51:19 crc kubenswrapper[4871]: I1126 05:51:19.816121 4871 scope.go:117] "RemoveContainer" containerID="bfcdcb7d37158281d937ab83f18bc6ea078af9a240958799b86ca5ada3dce012" Nov 26 05:51:19 crc kubenswrapper[4871]: I1126 05:51:19.856765 4871 scope.go:117] "RemoveContainer" containerID="a37f7c1dd217788f012692caa6dcf8d8413cd3b61d4cf3cf6c0084a745500919" Nov 26 05:51:19 crc kubenswrapper[4871]: I1126 05:51:19.956154 4871 scope.go:117] "RemoveContainer" containerID="7d77946b042e750ca6623b9dcba07756afad16d8a765a2519256ae2c396e6fa6" Nov 26 05:51:19 crc kubenswrapper[4871]: I1126 05:51:19.978983 4871 scope.go:117] "RemoveContainer" containerID="cda44fc22a31a318115b323e855b216c5bd22e374baaab577bd79a79c068721b" Nov 26 05:51:20 crc kubenswrapper[4871]: I1126 05:51:20.000129 4871 scope.go:117] "RemoveContainer" containerID="95a1257b7b87c0c6f5e3a60407c7428ca2153e4f024c94894b20f9aead01443b" Nov 26 05:51:20 crc kubenswrapper[4871]: I1126 05:51:20.029105 4871 scope.go:117] "RemoveContainer" containerID="b8e969a5d5e59b0221edc667450c8de831c8bdee920d320a70eaedfccc530bcc" Nov 26 05:51:20 crc kubenswrapper[4871]: I1126 05:51:20.050673 4871 scope.go:117] "RemoveContainer" containerID="a1bee00c5798a50238bb7af9d9709554171477c19e620ebf30e1eab38960dc87" Nov 26 05:51:23 crc kubenswrapper[4871]: I1126 05:51:23.615251 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:51:23 crc kubenswrapper[4871]: I1126 05:51:23.615815 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:51:23 crc kubenswrapper[4871]: I1126 05:51:23.615863 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:51:23 crc kubenswrapper[4871]: I1126 05:51:23.616698 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 05:51:23 crc kubenswrapper[4871]: I1126 05:51:23.616754 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" gracePeriod=600 Nov 26 05:51:23 crc kubenswrapper[4871]: E1126 05:51:23.734550 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:51:23 crc kubenswrapper[4871]: I1126 05:51:23.931042 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" exitCode=0 Nov 26 05:51:23 crc kubenswrapper[4871]: I1126 05:51:23.931117 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff"} Nov 26 05:51:23 crc kubenswrapper[4871]: I1126 05:51:23.931406 4871 scope.go:117] "RemoveContainer" containerID="37292f3b6ef7c2c0c15724c5c3a632dff71152a03a81708ad9d2ed933a0a1b15" Nov 26 05:51:23 crc kubenswrapper[4871]: I1126 05:51:23.932131 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:51:23 crc kubenswrapper[4871]: E1126 05:51:23.932421 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:51:36 crc kubenswrapper[4871]: I1126 05:51:36.508775 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:51:36 crc kubenswrapper[4871]: E1126 05:51:36.510585 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:51:48 crc kubenswrapper[4871]: I1126 05:51:48.507477 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:51:48 crc kubenswrapper[4871]: E1126 05:51:48.508390 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:52:00 crc kubenswrapper[4871]: I1126 05:52:00.507920 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:52:00 crc kubenswrapper[4871]: E1126 05:52:00.508881 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:52:11 crc kubenswrapper[4871]: I1126 05:52:11.507677 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:52:11 crc kubenswrapper[4871]: E1126 05:52:11.508251 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:52:15 crc kubenswrapper[4871]: I1126 05:52:15.500686 4871 generic.go:334] "Generic (PLEG): container finished" podID="a811292e-f231-48cd-98b5-4acd21f945ed" containerID="1cd1c751232512b20730529b078f5745d9dabc89b06c89017fa56b4cb0d08de5" exitCode=0 Nov 26 05:52:15 crc kubenswrapper[4871]: I1126 05:52:15.500801 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" event={"ID":"a811292e-f231-48cd-98b5-4acd21f945ed","Type":"ContainerDied","Data":"1cd1c751232512b20730529b078f5745d9dabc89b06c89017fa56b4cb0d08de5"} Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.022754 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.161995 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a811292e-f231-48cd-98b5-4acd21f945ed-inventory\") pod \"a811292e-f231-48cd-98b5-4acd21f945ed\" (UID: \"a811292e-f231-48cd-98b5-4acd21f945ed\") " Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.162039 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k5rg7\" (UniqueName: \"kubernetes.io/projected/a811292e-f231-48cd-98b5-4acd21f945ed-kube-api-access-k5rg7\") pod \"a811292e-f231-48cd-98b5-4acd21f945ed\" (UID: \"a811292e-f231-48cd-98b5-4acd21f945ed\") " Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.162244 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a811292e-f231-48cd-98b5-4acd21f945ed-ssh-key\") pod \"a811292e-f231-48cd-98b5-4acd21f945ed\" (UID: \"a811292e-f231-48cd-98b5-4acd21f945ed\") " Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.162277 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a811292e-f231-48cd-98b5-4acd21f945ed-bootstrap-combined-ca-bundle\") pod \"a811292e-f231-48cd-98b5-4acd21f945ed\" (UID: \"a811292e-f231-48cd-98b5-4acd21f945ed\") " Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.168924 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a811292e-f231-48cd-98b5-4acd21f945ed-kube-api-access-k5rg7" (OuterVolumeSpecName: "kube-api-access-k5rg7") pod "a811292e-f231-48cd-98b5-4acd21f945ed" (UID: "a811292e-f231-48cd-98b5-4acd21f945ed"). InnerVolumeSpecName "kube-api-access-k5rg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.171778 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a811292e-f231-48cd-98b5-4acd21f945ed-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "a811292e-f231-48cd-98b5-4acd21f945ed" (UID: "a811292e-f231-48cd-98b5-4acd21f945ed"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.211136 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a811292e-f231-48cd-98b5-4acd21f945ed-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a811292e-f231-48cd-98b5-4acd21f945ed" (UID: "a811292e-f231-48cd-98b5-4acd21f945ed"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.212042 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a811292e-f231-48cd-98b5-4acd21f945ed-inventory" (OuterVolumeSpecName: "inventory") pod "a811292e-f231-48cd-98b5-4acd21f945ed" (UID: "a811292e-f231-48cd-98b5-4acd21f945ed"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.349345 4871 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a811292e-f231-48cd-98b5-4acd21f945ed-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.349640 4871 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a811292e-f231-48cd-98b5-4acd21f945ed-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.349657 4871 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a811292e-f231-48cd-98b5-4acd21f945ed-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.349679 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k5rg7\" (UniqueName: \"kubernetes.io/projected/a811292e-f231-48cd-98b5-4acd21f945ed-kube-api-access-k5rg7\") on node \"crc\" DevicePath \"\"" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.526635 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" event={"ID":"a811292e-f231-48cd-98b5-4acd21f945ed","Type":"ContainerDied","Data":"2e99cd27abe47dfd72e85d17545ac48fef85bca76c7c8dcdae7ad6deb7ded26d"} Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.526681 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e99cd27abe47dfd72e85d17545ac48fef85bca76c7c8dcdae7ad6deb7ded26d" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.526740 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.610320 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt"] Nov 26 05:52:17 crc kubenswrapper[4871]: E1126 05:52:17.610963 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e014705-5812-4158-af04-274819aba269" containerName="extract-content" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.611042 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e014705-5812-4158-af04-274819aba269" containerName="extract-content" Nov 26 05:52:17 crc kubenswrapper[4871]: E1126 05:52:17.611130 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e014705-5812-4158-af04-274819aba269" containerName="extract-utilities" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.611193 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e014705-5812-4158-af04-274819aba269" containerName="extract-utilities" Nov 26 05:52:17 crc kubenswrapper[4871]: E1126 05:52:17.611257 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e014705-5812-4158-af04-274819aba269" containerName="registry-server" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.611311 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e014705-5812-4158-af04-274819aba269" containerName="registry-server" Nov 26 05:52:17 crc kubenswrapper[4871]: E1126 05:52:17.611388 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a811292e-f231-48cd-98b5-4acd21f945ed" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.611440 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="a811292e-f231-48cd-98b5-4acd21f945ed" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.611681 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e014705-5812-4158-af04-274819aba269" containerName="registry-server" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.611761 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="a811292e-f231-48cd-98b5-4acd21f945ed" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.612483 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.615682 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.615796 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.615840 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.616853 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-pjzlp" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.625145 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt"] Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.758723 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt\" (UID: \"e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.758852 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt\" (UID: \"e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.758888 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4zm9\" (UniqueName: \"kubernetes.io/projected/e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa-kube-api-access-s4zm9\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt\" (UID: \"e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.861767 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt\" (UID: \"e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.861855 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4zm9\" (UniqueName: \"kubernetes.io/projected/e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa-kube-api-access-s4zm9\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt\" (UID: \"e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.862124 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt\" (UID: \"e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.866451 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt\" (UID: \"e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.868611 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt\" (UID: \"e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.895581 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4zm9\" (UniqueName: \"kubernetes.io/projected/e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa-kube-api-access-s4zm9\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt\" (UID: \"e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" Nov 26 05:52:17 crc kubenswrapper[4871]: I1126 05:52:17.929984 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" Nov 26 05:52:18 crc kubenswrapper[4871]: W1126 05:52:18.493377 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode0d17dc4_5d95_48fe_bf52_6241f6bfd6fa.slice/crio-4ae7e4881d2a161f01c90b78c961001779906343560baee357348d60e36dbf0b WatchSource:0}: Error finding container 4ae7e4881d2a161f01c90b78c961001779906343560baee357348d60e36dbf0b: Status 404 returned error can't find the container with id 4ae7e4881d2a161f01c90b78c961001779906343560baee357348d60e36dbf0b Nov 26 05:52:18 crc kubenswrapper[4871]: I1126 05:52:18.493552 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt"] Nov 26 05:52:18 crc kubenswrapper[4871]: I1126 05:52:18.496308 4871 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 05:52:18 crc kubenswrapper[4871]: I1126 05:52:18.539028 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" event={"ID":"e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa","Type":"ContainerStarted","Data":"4ae7e4881d2a161f01c90b78c961001779906343560baee357348d60e36dbf0b"} Nov 26 05:52:19 crc kubenswrapper[4871]: I1126 05:52:19.566495 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" event={"ID":"e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa","Type":"ContainerStarted","Data":"d5a407a77ad3856b114df74d546b4fe2462c120753c82e462496259e4b6725e7"} Nov 26 05:52:19 crc kubenswrapper[4871]: I1126 05:52:19.604719 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" podStartSLOduration=2.111446751 podStartE2EDuration="2.604522594s" podCreationTimestamp="2025-11-26 05:52:17 +0000 UTC" firstStartedPulling="2025-11-26 05:52:18.495883031 +0000 UTC m=+1596.678934657" lastFinishedPulling="2025-11-26 05:52:18.988958894 +0000 UTC m=+1597.172010500" observedRunningTime="2025-11-26 05:52:19.586707592 +0000 UTC m=+1597.769759188" watchObservedRunningTime="2025-11-26 05:52:19.604522594 +0000 UTC m=+1597.787574230" Nov 26 05:52:20 crc kubenswrapper[4871]: I1126 05:52:20.185044 4871 scope.go:117] "RemoveContainer" containerID="09724c258dbd4fdee6bf3c02e2df545d1dab0b3e6e40cccfef054365c4cf858f" Nov 26 05:52:20 crc kubenswrapper[4871]: I1126 05:52:20.235216 4871 scope.go:117] "RemoveContainer" containerID="9fc8d2dd9a0b2c8181dbb18519b90acfa2304aa655cb3b8d656960f494713cb1" Nov 26 05:52:20 crc kubenswrapper[4871]: I1126 05:52:20.264989 4871 scope.go:117] "RemoveContainer" containerID="cad0798f2898de3a0b31bc0d4cd4c5c6132d0208856588e89b33940a53278e1d" Nov 26 05:52:20 crc kubenswrapper[4871]: I1126 05:52:20.298386 4871 scope.go:117] "RemoveContainer" containerID="5f0b2b9d5c4271014fe70b57c1328de4e93437bd862e6aef85401f4a8fbb9102" Nov 26 05:52:20 crc kubenswrapper[4871]: I1126 05:52:20.328981 4871 scope.go:117] "RemoveContainer" containerID="f905198e62a59f32cce4e027ce3975243ab73e386c1a91bc0e65169f4d4e6f43" Nov 26 05:52:25 crc kubenswrapper[4871]: I1126 05:52:25.513183 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:52:25 crc kubenswrapper[4871]: E1126 05:52:25.514264 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:52:38 crc kubenswrapper[4871]: I1126 05:52:38.508233 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:52:38 crc kubenswrapper[4871]: E1126 05:52:38.509516 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:52:43 crc kubenswrapper[4871]: I1126 05:52:43.068197 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-5f39-account-create-update-wn6v7"] Nov 26 05:52:43 crc kubenswrapper[4871]: I1126 05:52:43.083716 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-r7k7z"] Nov 26 05:52:43 crc kubenswrapper[4871]: I1126 05:52:43.098119 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-btz92"] Nov 26 05:52:43 crc kubenswrapper[4871]: I1126 05:52:43.112564 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-3b84-account-create-update-wz5gq"] Nov 26 05:52:43 crc kubenswrapper[4871]: I1126 05:52:43.125425 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-create-pnbw4"] Nov 26 05:52:43 crc kubenswrapper[4871]: I1126 05:52:43.134814 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-5f39-account-create-update-wn6v7"] Nov 26 05:52:43 crc kubenswrapper[4871]: I1126 05:52:43.142577 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-4e26-account-create-update-6rmj5"] Nov 26 05:52:43 crc kubenswrapper[4871]: I1126 05:52:43.150660 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-create-pnbw4"] Nov 26 05:52:43 crc kubenswrapper[4871]: I1126 05:52:43.158680 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-r7k7z"] Nov 26 05:52:43 crc kubenswrapper[4871]: I1126 05:52:43.166746 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-3b84-account-create-update-wz5gq"] Nov 26 05:52:43 crc kubenswrapper[4871]: I1126 05:52:43.175711 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-btz92"] Nov 26 05:52:43 crc kubenswrapper[4871]: I1126 05:52:43.184115 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-4e26-account-create-update-6rmj5"] Nov 26 05:52:44 crc kubenswrapper[4871]: I1126 05:52:44.527626 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36652a1c-6392-4693-86fd-2ec4c2955cd6" path="/var/lib/kubelet/pods/36652a1c-6392-4693-86fd-2ec4c2955cd6/volumes" Nov 26 05:52:44 crc kubenswrapper[4871]: I1126 05:52:44.529003 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54b1fab8-ed9d-41f9-bd32-504ea14de7f7" path="/var/lib/kubelet/pods/54b1fab8-ed9d-41f9-bd32-504ea14de7f7/volumes" Nov 26 05:52:44 crc kubenswrapper[4871]: I1126 05:52:44.530561 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f197c15-87fa-40c6-9cbf-200c9746aba7" path="/var/lib/kubelet/pods/9f197c15-87fa-40c6-9cbf-200c9746aba7/volumes" Nov 26 05:52:44 crc kubenswrapper[4871]: I1126 05:52:44.531459 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a56ec863-f1f4-48f3-b167-54ec413401f1" path="/var/lib/kubelet/pods/a56ec863-f1f4-48f3-b167-54ec413401f1/volumes" Nov 26 05:52:44 crc kubenswrapper[4871]: I1126 05:52:44.534651 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef4d4490-6b6b-406b-b626-f135975b6e4a" path="/var/lib/kubelet/pods/ef4d4490-6b6b-406b-b626-f135975b6e4a/volumes" Nov 26 05:52:44 crc kubenswrapper[4871]: I1126 05:52:44.536315 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8e1692f-fcdb-4735-b4b4-904fb9c9da85" path="/var/lib/kubelet/pods/f8e1692f-fcdb-4735-b4b4-904fb9c9da85/volumes" Nov 26 05:52:52 crc kubenswrapper[4871]: I1126 05:52:52.525618 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:52:52 crc kubenswrapper[4871]: E1126 05:52:52.526882 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:53:03 crc kubenswrapper[4871]: I1126 05:53:03.507165 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:53:03 crc kubenswrapper[4871]: E1126 05:53:03.508266 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:53:17 crc kubenswrapper[4871]: I1126 05:53:17.508734 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:53:17 crc kubenswrapper[4871]: E1126 05:53:17.510079 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:53:20 crc kubenswrapper[4871]: I1126 05:53:20.425138 4871 scope.go:117] "RemoveContainer" containerID="5ac1d3eed6816da396a8d7235c2650aa398b83b8ab2e071bba92aa0736c529cd" Nov 26 05:53:20 crc kubenswrapper[4871]: I1126 05:53:20.456089 4871 scope.go:117] "RemoveContainer" containerID="f875ba7d40b8e668c71b071714d435ca9aef02c364572f25828337bd34d78636" Nov 26 05:53:20 crc kubenswrapper[4871]: I1126 05:53:20.474359 4871 scope.go:117] "RemoveContainer" containerID="cb9c5e4989892cabc82eb43569b3b3448075f546e1d52c806e3d67e33c5bde9d" Nov 26 05:53:20 crc kubenswrapper[4871]: I1126 05:53:20.496783 4871 scope.go:117] "RemoveContainer" containerID="618317baff62f8b0e781d4e784538e3afff587433bb9d703ad6e56c94bd46778" Nov 26 05:53:20 crc kubenswrapper[4871]: I1126 05:53:20.544295 4871 scope.go:117] "RemoveContainer" containerID="5fed9628adca9f4839b5ff67ff420d33c13b1d950ef598988443b8372f13d1ba" Nov 26 05:53:20 crc kubenswrapper[4871]: I1126 05:53:20.593262 4871 scope.go:117] "RemoveContainer" containerID="083f93c8a643487d9983932214b1df84f495dca5380e7d14a28a636a6d29a2e1" Nov 26 05:53:20 crc kubenswrapper[4871]: I1126 05:53:20.624088 4871 scope.go:117] "RemoveContainer" containerID="df80e4c8d168c0118fa7f1f905cb66cd1e44ca021cf15aedb5e51a40ea0d0d62" Nov 26 05:53:20 crc kubenswrapper[4871]: I1126 05:53:20.666959 4871 scope.go:117] "RemoveContainer" containerID="d3691846afd5a7c4106cdf341d2421dfacf83c13fa69bcb02c8768296823c6db" Nov 26 05:53:26 crc kubenswrapper[4871]: I1126 05:53:26.049213 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-a517-account-create-update-zhft2"] Nov 26 05:53:26 crc kubenswrapper[4871]: I1126 05:53:26.061302 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-kj54h"] Nov 26 05:53:26 crc kubenswrapper[4871]: I1126 05:53:26.070246 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-k6ddc"] Nov 26 05:53:26 crc kubenswrapper[4871]: I1126 05:53:26.077781 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-kj54h"] Nov 26 05:53:26 crc kubenswrapper[4871]: I1126 05:53:26.084896 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-k6ddc"] Nov 26 05:53:26 crc kubenswrapper[4871]: I1126 05:53:26.092021 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-a517-account-create-update-zhft2"] Nov 26 05:53:26 crc kubenswrapper[4871]: I1126 05:53:26.523991 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09f49677-5f6f-4144-9e29-4db96e4fcb1e" path="/var/lib/kubelet/pods/09f49677-5f6f-4144-9e29-4db96e4fcb1e/volumes" Nov 26 05:53:26 crc kubenswrapper[4871]: I1126 05:53:26.524844 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4666a76-36db-4a3e-a12d-0cfb82284f7a" path="/var/lib/kubelet/pods/a4666a76-36db-4a3e-a12d-0cfb82284f7a/volumes" Nov 26 05:53:26 crc kubenswrapper[4871]: I1126 05:53:26.525595 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d791f62b-80ce-4ea9-acdc-bfb288614bac" path="/var/lib/kubelet/pods/d791f62b-80ce-4ea9-acdc-bfb288614bac/volumes" Nov 26 05:53:27 crc kubenswrapper[4871]: I1126 05:53:27.036344 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-6b92-account-create-update-hzv5c"] Nov 26 05:53:27 crc kubenswrapper[4871]: I1126 05:53:27.047908 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-6b92-account-create-update-hzv5c"] Nov 26 05:53:28 crc kubenswrapper[4871]: I1126 05:53:28.531962 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07bd5933-72a5-4f99-b5f9-08d8cadc77e8" path="/var/lib/kubelet/pods/07bd5933-72a5-4f99-b5f9-08d8cadc77e8/volumes" Nov 26 05:53:30 crc kubenswrapper[4871]: I1126 05:53:30.506878 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:53:30 crc kubenswrapper[4871]: E1126 05:53:30.507574 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:53:31 crc kubenswrapper[4871]: I1126 05:53:31.061211 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-mw8tv"] Nov 26 05:53:31 crc kubenswrapper[4871]: I1126 05:53:31.076031 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-ac3b-account-create-update-dt6ts"] Nov 26 05:53:31 crc kubenswrapper[4871]: I1126 05:53:31.096793 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-hljkf"] Nov 26 05:53:31 crc kubenswrapper[4871]: I1126 05:53:31.105655 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-7821-account-create-update-f5m7h"] Nov 26 05:53:31 crc kubenswrapper[4871]: I1126 05:53:31.139395 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-mw8tv"] Nov 26 05:53:31 crc kubenswrapper[4871]: I1126 05:53:31.146630 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-hljkf"] Nov 26 05:53:31 crc kubenswrapper[4871]: I1126 05:53:31.156620 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-ac3b-account-create-update-dt6ts"] Nov 26 05:53:31 crc kubenswrapper[4871]: I1126 05:53:31.166409 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-7821-account-create-update-f5m7h"] Nov 26 05:53:32 crc kubenswrapper[4871]: I1126 05:53:32.529296 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0601d4ca-135d-4d81-87cf-13e178ed9660" path="/var/lib/kubelet/pods/0601d4ca-135d-4d81-87cf-13e178ed9660/volumes" Nov 26 05:53:32 crc kubenswrapper[4871]: I1126 05:53:32.530075 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25db4f6b-d9ba-4147-8f75-8283a144bc17" path="/var/lib/kubelet/pods/25db4f6b-d9ba-4147-8f75-8283a144bc17/volumes" Nov 26 05:53:32 crc kubenswrapper[4871]: I1126 05:53:32.530791 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81b8252a-3d5b-4d62-9d38-c5e696bbe613" path="/var/lib/kubelet/pods/81b8252a-3d5b-4d62-9d38-c5e696bbe613/volumes" Nov 26 05:53:32 crc kubenswrapper[4871]: I1126 05:53:32.531594 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9ce6687-ccf1-40e7-b1f8-b42502d5a149" path="/var/lib/kubelet/pods/a9ce6687-ccf1-40e7-b1f8-b42502d5a149/volumes" Nov 26 05:53:34 crc kubenswrapper[4871]: I1126 05:53:34.864470 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 05:53:34 crc kubenswrapper[4871]: I1126 05:53:34.867014 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 05:53:34 crc kubenswrapper[4871]: I1126 05:53:34.871846 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 05:53:34 crc kubenswrapper[4871]: I1126 05:53:34.873256 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 05:53:34 crc kubenswrapper[4871]: I1126 05:53:34.885816 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 05:53:34 crc kubenswrapper[4871]: I1126 05:53:34.927487 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7d1efa4c-d003-4a94-bbc8-d29969d96adb-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"7d1efa4c-d003-4a94-bbc8-d29969d96adb\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 05:53:34 crc kubenswrapper[4871]: I1126 05:53:34.927756 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7d1efa4c-d003-4a94-bbc8-d29969d96adb-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"7d1efa4c-d003-4a94-bbc8-d29969d96adb\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 05:53:35 crc kubenswrapper[4871]: I1126 05:53:35.029396 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7d1efa4c-d003-4a94-bbc8-d29969d96adb-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"7d1efa4c-d003-4a94-bbc8-d29969d96adb\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 05:53:35 crc kubenswrapper[4871]: I1126 05:53:35.030057 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7d1efa4c-d003-4a94-bbc8-d29969d96adb-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"7d1efa4c-d003-4a94-bbc8-d29969d96adb\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 05:53:35 crc kubenswrapper[4871]: I1126 05:53:35.030093 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7d1efa4c-d003-4a94-bbc8-d29969d96adb-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"7d1efa4c-d003-4a94-bbc8-d29969d96adb\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 05:53:35 crc kubenswrapper[4871]: I1126 05:53:35.068164 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7d1efa4c-d003-4a94-bbc8-d29969d96adb-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"7d1efa4c-d003-4a94-bbc8-d29969d96adb\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 05:53:35 crc kubenswrapper[4871]: I1126 05:53:35.207301 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 05:53:35 crc kubenswrapper[4871]: I1126 05:53:35.711834 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 05:53:36 crc kubenswrapper[4871]: I1126 05:53:36.590468 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"7d1efa4c-d003-4a94-bbc8-d29969d96adb","Type":"ContainerStarted","Data":"eb1fcedd2b666d0d601b78fb02f208eed7b78e30cb8e1192045798ee35d34e92"} Nov 26 05:53:36 crc kubenswrapper[4871]: I1126 05:53:36.590517 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"7d1efa4c-d003-4a94-bbc8-d29969d96adb","Type":"ContainerStarted","Data":"96d722c1c8c4838f8adc39652075b6a3f356e94d525200451c58dd4b159c936e"} Nov 26 05:53:36 crc kubenswrapper[4871]: I1126 05:53:36.611333 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=2.611303528 podStartE2EDuration="2.611303528s" podCreationTimestamp="2025-11-26 05:53:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:53:36.604429597 +0000 UTC m=+1674.787481223" watchObservedRunningTime="2025-11-26 05:53:36.611303528 +0000 UTC m=+1674.794355124" Nov 26 05:53:37 crc kubenswrapper[4871]: I1126 05:53:37.032121 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-2mr78"] Nov 26 05:53:37 crc kubenswrapper[4871]: I1126 05:53:37.040284 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-2mr78"] Nov 26 05:53:37 crc kubenswrapper[4871]: I1126 05:53:37.602761 4871 generic.go:334] "Generic (PLEG): container finished" podID="7d1efa4c-d003-4a94-bbc8-d29969d96adb" containerID="eb1fcedd2b666d0d601b78fb02f208eed7b78e30cb8e1192045798ee35d34e92" exitCode=0 Nov 26 05:53:37 crc kubenswrapper[4871]: I1126 05:53:37.602824 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"7d1efa4c-d003-4a94-bbc8-d29969d96adb","Type":"ContainerDied","Data":"eb1fcedd2b666d0d601b78fb02f208eed7b78e30cb8e1192045798ee35d34e92"} Nov 26 05:53:38 crc kubenswrapper[4871]: I1126 05:53:38.532743 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="136e68fc-176f-4240-9876-53e81cc4caab" path="/var/lib/kubelet/pods/136e68fc-176f-4240-9876-53e81cc4caab/volumes" Nov 26 05:53:39 crc kubenswrapper[4871]: I1126 05:53:39.075067 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 05:53:39 crc kubenswrapper[4871]: I1126 05:53:39.157032 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7d1efa4c-d003-4a94-bbc8-d29969d96adb-kubelet-dir\") pod \"7d1efa4c-d003-4a94-bbc8-d29969d96adb\" (UID: \"7d1efa4c-d003-4a94-bbc8-d29969d96adb\") " Nov 26 05:53:39 crc kubenswrapper[4871]: I1126 05:53:39.157158 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7d1efa4c-d003-4a94-bbc8-d29969d96adb-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "7d1efa4c-d003-4a94-bbc8-d29969d96adb" (UID: "7d1efa4c-d003-4a94-bbc8-d29969d96adb"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:53:39 crc kubenswrapper[4871]: I1126 05:53:39.157174 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7d1efa4c-d003-4a94-bbc8-d29969d96adb-kube-api-access\") pod \"7d1efa4c-d003-4a94-bbc8-d29969d96adb\" (UID: \"7d1efa4c-d003-4a94-bbc8-d29969d96adb\") " Nov 26 05:53:39 crc kubenswrapper[4871]: I1126 05:53:39.158351 4871 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7d1efa4c-d003-4a94-bbc8-d29969d96adb-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 05:53:39 crc kubenswrapper[4871]: I1126 05:53:39.163503 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d1efa4c-d003-4a94-bbc8-d29969d96adb-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "7d1efa4c-d003-4a94-bbc8-d29969d96adb" (UID: "7d1efa4c-d003-4a94-bbc8-d29969d96adb"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:53:39 crc kubenswrapper[4871]: I1126 05:53:39.260412 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7d1efa4c-d003-4a94-bbc8-d29969d96adb-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 05:53:39 crc kubenswrapper[4871]: I1126 05:53:39.638128 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"7d1efa4c-d003-4a94-bbc8-d29969d96adb","Type":"ContainerDied","Data":"96d722c1c8c4838f8adc39652075b6a3f356e94d525200451c58dd4b159c936e"} Nov 26 05:53:39 crc kubenswrapper[4871]: I1126 05:53:39.638174 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="96d722c1c8c4838f8adc39652075b6a3f356e94d525200451c58dd4b159c936e" Nov 26 05:53:39 crc kubenswrapper[4871]: I1126 05:53:39.638265 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 05:53:41 crc kubenswrapper[4871]: I1126 05:53:41.060397 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 05:53:41 crc kubenswrapper[4871]: E1126 05:53:41.061288 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d1efa4c-d003-4a94-bbc8-d29969d96adb" containerName="pruner" Nov 26 05:53:41 crc kubenswrapper[4871]: I1126 05:53:41.061318 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d1efa4c-d003-4a94-bbc8-d29969d96adb" containerName="pruner" Nov 26 05:53:41 crc kubenswrapper[4871]: I1126 05:53:41.061756 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d1efa4c-d003-4a94-bbc8-d29969d96adb" containerName="pruner" Nov 26 05:53:41 crc kubenswrapper[4871]: I1126 05:53:41.063047 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 05:53:41 crc kubenswrapper[4871]: I1126 05:53:41.067951 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 05:53:41 crc kubenswrapper[4871]: I1126 05:53:41.069509 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 05:53:41 crc kubenswrapper[4871]: I1126 05:53:41.080617 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 05:53:41 crc kubenswrapper[4871]: I1126 05:53:41.203320 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/283281c5-37d5-4b0f-9824-13ffec29ddaf-var-lock\") pod \"installer-9-crc\" (UID: \"283281c5-37d5-4b0f-9824-13ffec29ddaf\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 05:53:41 crc kubenswrapper[4871]: I1126 05:53:41.203416 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/283281c5-37d5-4b0f-9824-13ffec29ddaf-kubelet-dir\") pod \"installer-9-crc\" (UID: \"283281c5-37d5-4b0f-9824-13ffec29ddaf\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 05:53:41 crc kubenswrapper[4871]: I1126 05:53:41.203467 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/283281c5-37d5-4b0f-9824-13ffec29ddaf-kube-api-access\") pod \"installer-9-crc\" (UID: \"283281c5-37d5-4b0f-9824-13ffec29ddaf\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 05:53:41 crc kubenswrapper[4871]: I1126 05:53:41.305877 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/283281c5-37d5-4b0f-9824-13ffec29ddaf-var-lock\") pod \"installer-9-crc\" (UID: \"283281c5-37d5-4b0f-9824-13ffec29ddaf\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 05:53:41 crc kubenswrapper[4871]: I1126 05:53:41.305992 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/283281c5-37d5-4b0f-9824-13ffec29ddaf-kubelet-dir\") pod \"installer-9-crc\" (UID: \"283281c5-37d5-4b0f-9824-13ffec29ddaf\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 05:53:41 crc kubenswrapper[4871]: I1126 05:53:41.306039 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/283281c5-37d5-4b0f-9824-13ffec29ddaf-kubelet-dir\") pod \"installer-9-crc\" (UID: \"283281c5-37d5-4b0f-9824-13ffec29ddaf\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 05:53:41 crc kubenswrapper[4871]: I1126 05:53:41.306049 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/283281c5-37d5-4b0f-9824-13ffec29ddaf-kube-api-access\") pod \"installer-9-crc\" (UID: \"283281c5-37d5-4b0f-9824-13ffec29ddaf\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 05:53:41 crc kubenswrapper[4871]: I1126 05:53:41.305990 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/283281c5-37d5-4b0f-9824-13ffec29ddaf-var-lock\") pod \"installer-9-crc\" (UID: \"283281c5-37d5-4b0f-9824-13ffec29ddaf\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 05:53:41 crc kubenswrapper[4871]: I1126 05:53:41.338815 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/283281c5-37d5-4b0f-9824-13ffec29ddaf-kube-api-access\") pod \"installer-9-crc\" (UID: \"283281c5-37d5-4b0f-9824-13ffec29ddaf\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 05:53:41 crc kubenswrapper[4871]: I1126 05:53:41.429644 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 05:53:41 crc kubenswrapper[4871]: I1126 05:53:41.951906 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 05:53:41 crc kubenswrapper[4871]: W1126 05:53:41.962286 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod283281c5_37d5_4b0f_9824_13ffec29ddaf.slice/crio-d7f2de629768d9f2fc0d90c01833a039e5092be703b3bb44327382a85d4dd9bf WatchSource:0}: Error finding container d7f2de629768d9f2fc0d90c01833a039e5092be703b3bb44327382a85d4dd9bf: Status 404 returned error can't find the container with id d7f2de629768d9f2fc0d90c01833a039e5092be703b3bb44327382a85d4dd9bf Nov 26 05:53:42 crc kubenswrapper[4871]: I1126 05:53:42.523326 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:53:42 crc kubenswrapper[4871]: E1126 05:53:42.524106 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:53:42 crc kubenswrapper[4871]: I1126 05:53:42.668722 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"283281c5-37d5-4b0f-9824-13ffec29ddaf","Type":"ContainerStarted","Data":"38d8b39c43544325db12a74f5908844927db68c70d27a58b69fde34327d88bc3"} Nov 26 05:53:42 crc kubenswrapper[4871]: I1126 05:53:42.668790 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"283281c5-37d5-4b0f-9824-13ffec29ddaf","Type":"ContainerStarted","Data":"d7f2de629768d9f2fc0d90c01833a039e5092be703b3bb44327382a85d4dd9bf"} Nov 26 05:53:42 crc kubenswrapper[4871]: I1126 05:53:42.692111 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=1.6920889099999998 podStartE2EDuration="1.69208891s" podCreationTimestamp="2025-11-26 05:53:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:53:42.686174294 +0000 UTC m=+1680.869225930" watchObservedRunningTime="2025-11-26 05:53:42.69208891 +0000 UTC m=+1680.875140496" Nov 26 05:53:43 crc kubenswrapper[4871]: I1126 05:53:43.035731 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-sync-gggp7"] Nov 26 05:53:43 crc kubenswrapper[4871]: I1126 05:53:43.047496 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-sync-gggp7"] Nov 26 05:53:44 crc kubenswrapper[4871]: I1126 05:53:44.520101 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e893829-69cb-4a4f-9b97-5b96332e5724" path="/var/lib/kubelet/pods/1e893829-69cb-4a4f-9b97-5b96332e5724/volumes" Nov 26 05:53:56 crc kubenswrapper[4871]: I1126 05:53:56.507776 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:53:56 crc kubenswrapper[4871]: E1126 05:53:56.508663 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:54:08 crc kubenswrapper[4871]: I1126 05:54:08.508098 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:54:08 crc kubenswrapper[4871]: E1126 05:54:08.509394 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:54:14 crc kubenswrapper[4871]: I1126 05:54:14.054219 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-kvkr5"] Nov 26 05:54:14 crc kubenswrapper[4871]: I1126 05:54:14.063684 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-kvkr5"] Nov 26 05:54:14 crc kubenswrapper[4871]: I1126 05:54:14.530016 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="977ad0ca-daf1-4b9d-b75a-c697ff3239c2" path="/var/lib/kubelet/pods/977ad0ca-daf1-4b9d-b75a-c697ff3239c2/volumes" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.359651 4871 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.361769 4871 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.361893 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.362086 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d" gracePeriod=15 Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.362207 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934" gracePeriod=15 Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.362219 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51" gracePeriod=15 Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.362227 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d" gracePeriod=15 Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.362250 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438" gracePeriod=15 Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.363063 4871 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 05:54:20 crc kubenswrapper[4871]: E1126 05:54:20.363748 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.364095 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 05:54:20 crc kubenswrapper[4871]: E1126 05:54:20.364118 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.364131 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 05:54:20 crc kubenswrapper[4871]: E1126 05:54:20.364190 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.364202 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 26 05:54:20 crc kubenswrapper[4871]: E1126 05:54:20.364231 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.364244 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 05:54:20 crc kubenswrapper[4871]: E1126 05:54:20.364261 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.364272 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 05:54:20 crc kubenswrapper[4871]: E1126 05:54:20.364295 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.364306 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.364656 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.364695 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.364740 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.364768 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.364785 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.364816 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 05:54:20 crc kubenswrapper[4871]: E1126 05:54:20.365178 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.365205 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 05:54:20 crc kubenswrapper[4871]: E1126 05:54:20.407835 4871 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.44:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.529191 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.529257 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.529322 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.529372 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.529435 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.529467 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.529538 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.529560 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.630996 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.631038 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.631071 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.631151 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.631190 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.631186 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.631313 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.631415 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.631507 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.631663 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.631697 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.631747 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.631769 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.631846 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.631874 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.632002 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.708968 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:54:20 crc kubenswrapper[4871]: E1126 05:54:20.743805 4871 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.44:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b78c3a5de5adc openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 05:54:20.742449884 +0000 UTC m=+1718.925501470,LastTimestamp:2025-11-26 05:54:20.742449884 +0000 UTC m=+1718.925501470,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.858186 4871 scope.go:117] "RemoveContainer" containerID="1e348798dd5b55775761ee4b70f95d158dab764cbc5777e9a0943efa94ba5558" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.918889 4871 scope.go:117] "RemoveContainer" containerID="03c2a2301d01ead888510ff8d480fc972954ab66ec9f544fc5751c2dc780b749" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.973913 4871 scope.go:117] "RemoveContainer" containerID="af3229e3f6d0be7e177777eb582543ac0a1185b964b940f73e9f713551687602" Nov 26 05:54:20 crc kubenswrapper[4871]: I1126 05:54:20.996965 4871 scope.go:117] "RemoveContainer" containerID="e3869b59a5122fe45ea941c46900c5b12caffa842755e106eed0ba7db87cb2c0" Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.020249 4871 scope.go:117] "RemoveContainer" containerID="17b089a238476abee8212d46ffff81ee90b084d8f9639eac7cb698c4d51e9c5a" Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.048268 4871 scope.go:117] "RemoveContainer" containerID="33fd2a391d54152eaf8b7e3b1456de56fe793bd89ee5268bbef8736bf39c15d6" Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.083060 4871 scope.go:117] "RemoveContainer" containerID="4cad5d60e79ce469f85069056ad86015e022a74a277ba0f87ec936c22221c73a" Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.124011 4871 scope.go:117] "RemoveContainer" containerID="3cb3de7e18022b03f015c5b5076cb8bea79f1cf86da013952f62066c36a38cce" Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.128702 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.132300 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.133512 4871 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d" exitCode=0 Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.133566 4871 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438" exitCode=0 Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.133582 4871 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51" exitCode=0 Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.133595 4871 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934" exitCode=2 Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.133607 4871 scope.go:117] "RemoveContainer" containerID="7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea" Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.136366 4871 generic.go:334] "Generic (PLEG): container finished" podID="283281c5-37d5-4b0f-9824-13ffec29ddaf" containerID="38d8b39c43544325db12a74f5908844927db68c70d27a58b69fde34327d88bc3" exitCode=0 Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.136446 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"283281c5-37d5-4b0f-9824-13ffec29ddaf","Type":"ContainerDied","Data":"38d8b39c43544325db12a74f5908844927db68c70d27a58b69fde34327d88bc3"} Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.137409 4871 status_manager.go:851] "Failed to get status for pod" podUID="283281c5-37d5-4b0f-9824-13ffec29ddaf" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.141254 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"e556fb7a67a2ab3468f23c5b9e78689aa947b6b257e9076049c6bc73006a10b1"} Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.141302 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"6eb42a8c6847c8a24277cfafc802406eaf32ea157a3237f19e8db832af73bc23"} Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.141969 4871 status_manager.go:851] "Failed to get status for pod" podUID="283281c5-37d5-4b0f-9824-13ffec29ddaf" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:21 crc kubenswrapper[4871]: E1126 05:54:21.142687 4871 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.44:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.144385 4871 scope.go:117] "RemoveContainer" containerID="0d7d4bcebf3e501276514ff5ba7838d4951fc3f90c68c7d6b24ffd745bdf19a6" Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.201493 4871 scope.go:117] "RemoveContainer" containerID="7242948d12d8d24c3c33d7ef9950db597acaf4fa0d6bd5680c5d296e0da528f0" Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.254695 4871 scope.go:117] "RemoveContainer" containerID="1f54bb2588599460bb6ce0b0e2f42fabe04e0ac3b77d40b2fd98f9f5b76dd4cf" Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.286276 4871 scope.go:117] "RemoveContainer" containerID="7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea" Nov 26 05:54:21 crc kubenswrapper[4871]: E1126 05:54:21.286839 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\": container with ID starting with 7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea not found: ID does not exist" containerID="7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea" Nov 26 05:54:21 crc kubenswrapper[4871]: E1126 05:54:21.286878 4871 kuberuntime_gc.go:150] "Failed to remove container" err="failed to get container status \"7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\": rpc error: code = NotFound desc = could not find container \"7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea\": container with ID starting with 7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea not found: ID does not exist" containerID="7d0d390f361672d7b37f42cfc918b64aca7ad8dc0a8714d0a715efe1622dfbea" Nov 26 05:54:21 crc kubenswrapper[4871]: I1126 05:54:21.510024 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:54:21 crc kubenswrapper[4871]: E1126 05:54:21.510610 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:54:21 crc kubenswrapper[4871]: E1126 05:54:21.579586 4871 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openstack/glance-glance-default-external-api-0: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/persistentvolumeclaims/glance-glance-default-external-api-0\": dial tcp 38.102.83.44:6443: connect: connection refused" pod="openstack/glance-default-external-api-0" volumeName="glance" Nov 26 05:54:22 crc kubenswrapper[4871]: I1126 05:54:22.165864 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 05:54:22 crc kubenswrapper[4871]: I1126 05:54:22.543909 4871 status_manager.go:851] "Failed to get status for pod" podUID="283281c5-37d5-4b0f-9824-13ffec29ddaf" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:22 crc kubenswrapper[4871]: E1126 05:54:22.672335 4871 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:22 crc kubenswrapper[4871]: E1126 05:54:22.672800 4871 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:22 crc kubenswrapper[4871]: E1126 05:54:22.673270 4871 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:22 crc kubenswrapper[4871]: E1126 05:54:22.673705 4871 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:22 crc kubenswrapper[4871]: I1126 05:54:22.673920 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 05:54:22 crc kubenswrapper[4871]: E1126 05:54:22.674111 4871 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:22 crc kubenswrapper[4871]: I1126 05:54:22.674138 4871 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 26 05:54:22 crc kubenswrapper[4871]: E1126 05:54:22.674376 4871 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.44:6443: connect: connection refused" interval="200ms" Nov 26 05:54:22 crc kubenswrapper[4871]: I1126 05:54:22.674780 4871 status_manager.go:851] "Failed to get status for pod" podUID="283281c5-37d5-4b0f-9824-13ffec29ddaf" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:22 crc kubenswrapper[4871]: I1126 05:54:22.816644 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/283281c5-37d5-4b0f-9824-13ffec29ddaf-kubelet-dir\") pod \"283281c5-37d5-4b0f-9824-13ffec29ddaf\" (UID: \"283281c5-37d5-4b0f-9824-13ffec29ddaf\") " Nov 26 05:54:22 crc kubenswrapper[4871]: I1126 05:54:22.816968 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/283281c5-37d5-4b0f-9824-13ffec29ddaf-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "283281c5-37d5-4b0f-9824-13ffec29ddaf" (UID: "283281c5-37d5-4b0f-9824-13ffec29ddaf"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:54:22 crc kubenswrapper[4871]: I1126 05:54:22.817057 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/283281c5-37d5-4b0f-9824-13ffec29ddaf-kube-api-access\") pod \"283281c5-37d5-4b0f-9824-13ffec29ddaf\" (UID: \"283281c5-37d5-4b0f-9824-13ffec29ddaf\") " Nov 26 05:54:22 crc kubenswrapper[4871]: I1126 05:54:22.817085 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/283281c5-37d5-4b0f-9824-13ffec29ddaf-var-lock\") pod \"283281c5-37d5-4b0f-9824-13ffec29ddaf\" (UID: \"283281c5-37d5-4b0f-9824-13ffec29ddaf\") " Nov 26 05:54:22 crc kubenswrapper[4871]: I1126 05:54:22.817491 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/283281c5-37d5-4b0f-9824-13ffec29ddaf-var-lock" (OuterVolumeSpecName: "var-lock") pod "283281c5-37d5-4b0f-9824-13ffec29ddaf" (UID: "283281c5-37d5-4b0f-9824-13ffec29ddaf"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:54:22 crc kubenswrapper[4871]: I1126 05:54:22.818014 4871 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/283281c5-37d5-4b0f-9824-13ffec29ddaf-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 05:54:22 crc kubenswrapper[4871]: I1126 05:54:22.818045 4871 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/283281c5-37d5-4b0f-9824-13ffec29ddaf-var-lock\") on node \"crc\" DevicePath \"\"" Nov 26 05:54:22 crc kubenswrapper[4871]: I1126 05:54:22.834412 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/283281c5-37d5-4b0f-9824-13ffec29ddaf-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "283281c5-37d5-4b0f-9824-13ffec29ddaf" (UID: "283281c5-37d5-4b0f-9824-13ffec29ddaf"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:54:22 crc kubenswrapper[4871]: E1126 05:54:22.877147 4871 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.44:6443: connect: connection refused" interval="400ms" Nov 26 05:54:22 crc kubenswrapper[4871]: I1126 05:54:22.921330 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/283281c5-37d5-4b0f-9824-13ffec29ddaf-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.035596 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.037360 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.038120 4871 status_manager.go:851] "Failed to get status for pod" podUID="283281c5-37d5-4b0f-9824-13ffec29ddaf" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.038502 4871 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.183818 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.185287 4871 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d" exitCode=0 Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.185381 4871 scope.go:117] "RemoveContainer" containerID="284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.185510 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.188355 4871 generic.go:334] "Generic (PLEG): container finished" podID="e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa" containerID="d5a407a77ad3856b114df74d546b4fe2462c120753c82e462496259e4b6725e7" exitCode=0 Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.188443 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" event={"ID":"e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa","Type":"ContainerDied","Data":"d5a407a77ad3856b114df74d546b4fe2462c120753c82e462496259e4b6725e7"} Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.189292 4871 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.189859 4871 status_manager.go:851] "Failed to get status for pod" podUID="e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.190359 4871 status_manager.go:851] "Failed to get status for pod" podUID="283281c5-37d5-4b0f-9824-13ffec29ddaf" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.192286 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"283281c5-37d5-4b0f-9824-13ffec29ddaf","Type":"ContainerDied","Data":"d7f2de629768d9f2fc0d90c01833a039e5092be703b3bb44327382a85d4dd9bf"} Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.192322 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d7f2de629768d9f2fc0d90c01833a039e5092be703b3bb44327382a85d4dd9bf" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.192367 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.210655 4871 status_manager.go:851] "Failed to get status for pod" podUID="e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.211238 4871 status_manager.go:851] "Failed to get status for pod" podUID="283281c5-37d5-4b0f-9824-13ffec29ddaf" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.211768 4871 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.226875 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.227211 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.227256 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.227354 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.227870 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.227935 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.228564 4871 scope.go:117] "RemoveContainer" containerID="5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.229702 4871 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.229851 4871 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.229972 4871 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.255216 4871 scope.go:117] "RemoveContainer" containerID="60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51" Nov 26 05:54:23 crc kubenswrapper[4871]: E1126 05:54:23.278145 4871 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.44:6443: connect: connection refused" interval="800ms" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.286816 4871 scope.go:117] "RemoveContainer" containerID="38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.317074 4871 scope.go:117] "RemoveContainer" containerID="ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.347989 4871 scope.go:117] "RemoveContainer" containerID="30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.400380 4871 scope.go:117] "RemoveContainer" containerID="284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d" Nov 26 05:54:23 crc kubenswrapper[4871]: E1126 05:54:23.403406 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\": container with ID starting with 284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d not found: ID does not exist" containerID="284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.403617 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d"} err="failed to get container status \"284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\": rpc error: code = NotFound desc = could not find container \"284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d\": container with ID starting with 284812a42eceaf03a2bfb74c4985ea18de33c40ff56025dc0c449f51a9aa288d not found: ID does not exist" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.403676 4871 scope.go:117] "RemoveContainer" containerID="5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438" Nov 26 05:54:23 crc kubenswrapper[4871]: E1126 05:54:23.406575 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\": container with ID starting with 5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438 not found: ID does not exist" containerID="5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.406739 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438"} err="failed to get container status \"5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\": rpc error: code = NotFound desc = could not find container \"5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438\": container with ID starting with 5e4fa7b2553393e0a69a0f18af7b16f61c7f68778f12eb386aee2b9c9ea32438 not found: ID does not exist" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.406852 4871 scope.go:117] "RemoveContainer" containerID="60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51" Nov 26 05:54:23 crc kubenswrapper[4871]: E1126 05:54:23.407324 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\": container with ID starting with 60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51 not found: ID does not exist" containerID="60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.407397 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51"} err="failed to get container status \"60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\": rpc error: code = NotFound desc = could not find container \"60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51\": container with ID starting with 60fa815f0991aa6d80eadd38928e2b0243f89be684dfc6f0163368c49d294b51 not found: ID does not exist" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.407435 4871 scope.go:117] "RemoveContainer" containerID="38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934" Nov 26 05:54:23 crc kubenswrapper[4871]: E1126 05:54:23.407796 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\": container with ID starting with 38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934 not found: ID does not exist" containerID="38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.407827 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934"} err="failed to get container status \"38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\": rpc error: code = NotFound desc = could not find container \"38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934\": container with ID starting with 38cd5bcb23b7042147b770e1f449cb265f021bef3b04ffd4ad726409c5424934 not found: ID does not exist" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.407845 4871 scope.go:117] "RemoveContainer" containerID="ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d" Nov 26 05:54:23 crc kubenswrapper[4871]: E1126 05:54:23.408440 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\": container with ID starting with ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d not found: ID does not exist" containerID="ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.408572 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d"} err="failed to get container status \"ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\": rpc error: code = NotFound desc = could not find container \"ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d\": container with ID starting with ff3455b6ad9f6e5d8ea1fadde5764afcf8d9425491d00477b23d1910b21d9f1d not found: ID does not exist" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.408657 4871 scope.go:117] "RemoveContainer" containerID="30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4" Nov 26 05:54:23 crc kubenswrapper[4871]: E1126 05:54:23.409185 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\": container with ID starting with 30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4 not found: ID does not exist" containerID="30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.409227 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4"} err="failed to get container status \"30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\": rpc error: code = NotFound desc = could not find container \"30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4\": container with ID starting with 30355d4198ff7d5248a15995e63efb618e4c64a95b5ed492c46377b7b095a2b4 not found: ID does not exist" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.516251 4871 status_manager.go:851] "Failed to get status for pod" podUID="e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.516694 4871 status_manager.go:851] "Failed to get status for pod" podUID="283281c5-37d5-4b0f-9824-13ffec29ddaf" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:23 crc kubenswrapper[4871]: I1126 05:54:23.517360 4871 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:24 crc kubenswrapper[4871]: E1126 05:54:24.079175 4871 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.44:6443: connect: connection refused" interval="1.6s" Nov 26 05:54:24 crc kubenswrapper[4871]: I1126 05:54:24.527322 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 26 05:54:25 crc kubenswrapper[4871]: E1126 05:54:25.680400 4871 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.44:6443: connect: connection refused" interval="3.2s" Nov 26 05:54:25 crc kubenswrapper[4871]: E1126 05:54:25.861571 4871 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.44:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b78c3a5de5adc openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 05:54:20.742449884 +0000 UTC m=+1718.925501470,LastTimestamp:2025-11-26 05:54:20.742449884 +0000 UTC m=+1718.925501470,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 05:54:27 crc kubenswrapper[4871]: E1126 05:54:27.542898 4871 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openstack/prometheus-metric-storage-db-prometheus-metric-storage-0: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/persistentvolumeclaims/prometheus-metric-storage-db-prometheus-metric-storage-0\": dial tcp 38.102.83.44:6443: connect: connection refused" pod="openstack/prometheus-metric-storage-0" volumeName="prometheus-metric-storage-db" Nov 26 05:54:28 crc kubenswrapper[4871]: E1126 05:54:28.881800 4871 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.44:6443: connect: connection refused" interval="6.4s" Nov 26 05:54:29 crc kubenswrapper[4871]: I1126 05:54:29.066154 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="e20fd17b-5b64-4272-9876-347ea057aa04" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 26 05:54:29 crc kubenswrapper[4871]: E1126 05:54:29.589168 4871 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openstack/persistence-rabbitmq-cell1-server-0: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/persistentvolumeclaims/persistence-rabbitmq-cell1-server-0\": dial tcp 38.102.83.44:6443: connect: connection refused" pod="openstack/rabbitmq-cell1-server-0" volumeName="persistence" Nov 26 05:54:31 crc kubenswrapper[4871]: I1126 05:54:31.506501 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:31 crc kubenswrapper[4871]: I1126 05:54:31.508006 4871 status_manager.go:851] "Failed to get status for pod" podUID="283281c5-37d5-4b0f-9824-13ffec29ddaf" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:31 crc kubenswrapper[4871]: I1126 05:54:31.508856 4871 status_manager.go:851] "Failed to get status for pod" podUID="e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:31 crc kubenswrapper[4871]: I1126 05:54:31.528673 4871 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8a2d2c7b-d0d7-40ac-b144-caf1cefe0993" Nov 26 05:54:31 crc kubenswrapper[4871]: I1126 05:54:31.528707 4871 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8a2d2c7b-d0d7-40ac-b144-caf1cefe0993" Nov 26 05:54:31 crc kubenswrapper[4871]: E1126 05:54:31.529112 4871 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:31 crc kubenswrapper[4871]: I1126 05:54:31.530236 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.297864 4871 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="fd0f2cc96acdb62128f6c719301188451f7c42337de83590af4a1104bcd49004" exitCode=0 Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.297972 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"fd0f2cc96acdb62128f6c719301188451f7c42337de83590af4a1104bcd49004"} Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.298451 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"07a4b9ab53af387c3267763db4038351e0e95730377112fc9dde541c7342abd4"} Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.298878 4871 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8a2d2c7b-d0d7-40ac-b144-caf1cefe0993" Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.298910 4871 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8a2d2c7b-d0d7-40ac-b144-caf1cefe0993" Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.299377 4871 status_manager.go:851] "Failed to get status for pod" podUID="e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:32 crc kubenswrapper[4871]: E1126 05:54:32.299402 4871 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.299730 4871 status_manager.go:851] "Failed to get status for pod" podUID="283281c5-37d5-4b0f-9824-13ffec29ddaf" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.301685 4871 generic.go:334] "Generic (PLEG): container finished" podID="0f2d5628-2ad3-400c-bc77-b0251683a83a" containerID="cdc9c87da2071f51195db8368cea3faabab605548e3e0bd0a674606a9811ca20" exitCode=1 Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.301767 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" event={"ID":"0f2d5628-2ad3-400c-bc77-b0251683a83a","Type":"ContainerDied","Data":"cdc9c87da2071f51195db8368cea3faabab605548e3e0bd0a674606a9811ca20"} Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.302243 4871 scope.go:117] "RemoveContainer" containerID="cdc9c87da2071f51195db8368cea3faabab605548e3e0bd0a674606a9811ca20" Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.302675 4871 status_manager.go:851] "Failed to get status for pod" podUID="283281c5-37d5-4b0f-9824-13ffec29ddaf" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.304216 4871 status_manager.go:851] "Failed to get status for pod" podUID="0f2d5628-2ad3-400c-bc77-b0251683a83a" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-645b9949f7-48k8g\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.304599 4871 status_manager.go:851] "Failed to get status for pod" podUID="e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.305772 4871 generic.go:334] "Generic (PLEG): container finished" podID="51410db5-d309-4625-8f36-02cf8f0ba419" containerID="d834d7cdfd9b843260c63f2c182c2499ca4b36944bbbbf70f194c781a4cf63db" exitCode=1 Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.305825 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" event={"ID":"51410db5-d309-4625-8f36-02cf8f0ba419","Type":"ContainerDied","Data":"d834d7cdfd9b843260c63f2c182c2499ca4b36944bbbbf70f194c781a4cf63db"} Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.306499 4871 scope.go:117] "RemoveContainer" containerID="d834d7cdfd9b843260c63f2c182c2499ca4b36944bbbbf70f194c781a4cf63db" Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.306696 4871 status_manager.go:851] "Failed to get status for pod" podUID="e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.307226 4871 status_manager.go:851] "Failed to get status for pod" podUID="51410db5-d309-4625-8f36-02cf8f0ba419" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-lzsqj\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.307580 4871 status_manager.go:851] "Failed to get status for pod" podUID="283281c5-37d5-4b0f-9824-13ffec29ddaf" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.307893 4871 status_manager.go:851] "Failed to get status for pod" podUID="0f2d5628-2ad3-400c-bc77-b0251683a83a" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-645b9949f7-48k8g\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.529933 4871 status_manager.go:851] "Failed to get status for pod" podUID="e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/pods/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.530927 4871 status_manager.go:851] "Failed to get status for pod" podUID="51410db5-d309-4625-8f36-02cf8f0ba419" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-lzsqj\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.533020 4871 status_manager.go:851] "Failed to get status for pod" podUID="283281c5-37d5-4b0f-9824-13ffec29ddaf" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.533336 4871 status_manager.go:851] "Failed to get status for pod" podUID="0f2d5628-2ad3-400c-bc77-b0251683a83a" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-645b9949f7-48k8g\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:32 crc kubenswrapper[4871]: I1126 05:54:32.533604 4871 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.44:6443: connect: connection refused" Nov 26 05:54:33 crc kubenswrapper[4871]: I1126 05:54:33.324346 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"37d9e0f2062091caca31445d5f164405c840fcdd63934f7426bb5320c9a0517b"} Nov 26 05:54:33 crc kubenswrapper[4871]: I1126 05:54:33.324649 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"d73b7f4abfbec31257e9103c430ccae1a3a955e09e3a3015c130ae3706ec83af"} Nov 26 05:54:33 crc kubenswrapper[4871]: I1126 05:54:33.324660 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"1cd64b9b86d70653f2d0f2b3af3ac38327670d200fd560ebbc359f0f9033f77e"} Nov 26 05:54:33 crc kubenswrapper[4871]: I1126 05:54:33.328963 4871 generic.go:334] "Generic (PLEG): container finished" podID="0f2d5628-2ad3-400c-bc77-b0251683a83a" containerID="caa2e48a4dc902ee736d46b8875c4cfa81b4a6939a7821fc7a8e1c8d411fe383" exitCode=1 Nov 26 05:54:33 crc kubenswrapper[4871]: I1126 05:54:33.329023 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" event={"ID":"0f2d5628-2ad3-400c-bc77-b0251683a83a","Type":"ContainerDied","Data":"caa2e48a4dc902ee736d46b8875c4cfa81b4a6939a7821fc7a8e1c8d411fe383"} Nov 26 05:54:33 crc kubenswrapper[4871]: I1126 05:54:33.329047 4871 scope.go:117] "RemoveContainer" containerID="cdc9c87da2071f51195db8368cea3faabab605548e3e0bd0a674606a9811ca20" Nov 26 05:54:33 crc kubenswrapper[4871]: I1126 05:54:33.330040 4871 scope.go:117] "RemoveContainer" containerID="caa2e48a4dc902ee736d46b8875c4cfa81b4a6939a7821fc7a8e1c8d411fe383" Nov 26 05:54:33 crc kubenswrapper[4871]: E1126 05:54:33.330469 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-645b9949f7-48k8g_metallb-system(0f2d5628-2ad3-400c-bc77-b0251683a83a)\"" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" podUID="0f2d5628-2ad3-400c-bc77-b0251683a83a" Nov 26 05:54:33 crc kubenswrapper[4871]: I1126 05:54:33.334719 4871 generic.go:334] "Generic (PLEG): container finished" podID="51410db5-d309-4625-8f36-02cf8f0ba419" containerID="53e982c8e34d28ea9e2fa9a75c1f02dbb078d4325b8653ab41ffd5e5af9f25be" exitCode=1 Nov 26 05:54:33 crc kubenswrapper[4871]: I1126 05:54:33.334819 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" event={"ID":"51410db5-d309-4625-8f36-02cf8f0ba419","Type":"ContainerDied","Data":"53e982c8e34d28ea9e2fa9a75c1f02dbb078d4325b8653ab41ffd5e5af9f25be"} Nov 26 05:54:33 crc kubenswrapper[4871]: I1126 05:54:33.335649 4871 scope.go:117] "RemoveContainer" containerID="53e982c8e34d28ea9e2fa9a75c1f02dbb078d4325b8653ab41ffd5e5af9f25be" Nov 26 05:54:33 crc kubenswrapper[4871]: E1126 05:54:33.336041 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-lzsqj_openstack-operators(51410db5-d309-4625-8f36-02cf8f0ba419)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" podUID="51410db5-d309-4625-8f36-02cf8f0ba419" Nov 26 05:54:33 crc kubenswrapper[4871]: I1126 05:54:33.341714 4871 generic.go:334] "Generic (PLEG): container finished" podID="33ba2b4e-6239-43c0-a694-6495b7ae2ba3" containerID="3e09aa8d4f0ef9000a5805f557fd681cb2f2582e5164380cadb9ccbd2bdb372b" exitCode=1 Nov 26 05:54:33 crc kubenswrapper[4871]: I1126 05:54:33.341779 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" event={"ID":"33ba2b4e-6239-43c0-a694-6495b7ae2ba3","Type":"ContainerDied","Data":"3e09aa8d4f0ef9000a5805f557fd681cb2f2582e5164380cadb9ccbd2bdb372b"} Nov 26 05:54:33 crc kubenswrapper[4871]: I1126 05:54:33.345567 4871 scope.go:117] "RemoveContainer" containerID="3e09aa8d4f0ef9000a5805f557fd681cb2f2582e5164380cadb9ccbd2bdb372b" Nov 26 05:54:33 crc kubenswrapper[4871]: I1126 05:54:33.427992 4871 scope.go:117] "RemoveContainer" containerID="d834d7cdfd9b843260c63f2c182c2499ca4b36944bbbbf70f194c781a4cf63db" Nov 26 05:54:34 crc kubenswrapper[4871]: I1126 05:54:34.352105 4871 generic.go:334] "Generic (PLEG): container finished" podID="33ba2b4e-6239-43c0-a694-6495b7ae2ba3" containerID="d5abb2da6d5d4dbb1d73ea36dde21af7e95ad0c3292da57bbc0125a7eb754783" exitCode=1 Nov 26 05:54:34 crc kubenswrapper[4871]: I1126 05:54:34.352191 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" event={"ID":"33ba2b4e-6239-43c0-a694-6495b7ae2ba3","Type":"ContainerDied","Data":"d5abb2da6d5d4dbb1d73ea36dde21af7e95ad0c3292da57bbc0125a7eb754783"} Nov 26 05:54:34 crc kubenswrapper[4871]: I1126 05:54:34.352413 4871 scope.go:117] "RemoveContainer" containerID="3e09aa8d4f0ef9000a5805f557fd681cb2f2582e5164380cadb9ccbd2bdb372b" Nov 26 05:54:34 crc kubenswrapper[4871]: I1126 05:54:34.353476 4871 scope.go:117] "RemoveContainer" containerID="d5abb2da6d5d4dbb1d73ea36dde21af7e95ad0c3292da57bbc0125a7eb754783" Nov 26 05:54:34 crc kubenswrapper[4871]: E1126 05:54:34.353805 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-9lvtk_openstack-operators(33ba2b4e-6239-43c0-a694-6495b7ae2ba3)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" podUID="33ba2b4e-6239-43c0-a694-6495b7ae2ba3" Nov 26 05:54:34 crc kubenswrapper[4871]: I1126 05:54:34.358771 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"c7bdd2825ca9950b6cc0e510b21f44ac713718a4f6dc649e1161f371e6dd539d"} Nov 26 05:54:34 crc kubenswrapper[4871]: I1126 05:54:34.358904 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"1e59052074e8db45e507211f07b053758d62b2a79d2adccd4897dd2e1dbe01bb"} Nov 26 05:54:34 crc kubenswrapper[4871]: I1126 05:54:34.359018 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:34 crc kubenswrapper[4871]: I1126 05:54:34.359220 4871 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8a2d2c7b-d0d7-40ac-b144-caf1cefe0993" Nov 26 05:54:34 crc kubenswrapper[4871]: I1126 05:54:34.359244 4871 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8a2d2c7b-d0d7-40ac-b144-caf1cefe0993" Nov 26 05:54:34 crc kubenswrapper[4871]: I1126 05:54:34.378046 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 26 05:54:34 crc kubenswrapper[4871]: I1126 05:54:34.378110 4871 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f" exitCode=1 Nov 26 05:54:34 crc kubenswrapper[4871]: I1126 05:54:34.378138 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f"} Nov 26 05:54:34 crc kubenswrapper[4871]: I1126 05:54:34.378885 4871 scope.go:117] "RemoveContainer" containerID="2ff2774ab515ecfe765901658de984e392610eec25f8cf7f49a808969ede966f" Nov 26 05:54:35 crc kubenswrapper[4871]: I1126 05:54:35.389540 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 26 05:54:35 crc kubenswrapper[4871]: I1126 05:54:35.389938 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"eae6d1b48552b9c6c275e36fcb749088a62c1276b14ef2772f766fde73ce26df"} Nov 26 05:54:35 crc kubenswrapper[4871]: I1126 05:54:35.526497 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" Nov 26 05:54:35 crc kubenswrapper[4871]: I1126 05:54:35.526853 4871 scope.go:117] "RemoveContainer" containerID="53e982c8e34d28ea9e2fa9a75c1f02dbb078d4325b8653ab41ffd5e5af9f25be" Nov 26 05:54:35 crc kubenswrapper[4871]: E1126 05:54:35.527053 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-lzsqj_openstack-operators(51410db5-d309-4625-8f36-02cf8f0ba419)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" podUID="51410db5-d309-4625-8f36-02cf8f0ba419" Nov 26 05:54:35 crc kubenswrapper[4871]: I1126 05:54:35.675038 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" Nov 26 05:54:35 crc kubenswrapper[4871]: I1126 05:54:35.675893 4871 scope.go:117] "RemoveContainer" containerID="d5abb2da6d5d4dbb1d73ea36dde21af7e95ad0c3292da57bbc0125a7eb754783" Nov 26 05:54:35 crc kubenswrapper[4871]: E1126 05:54:35.676180 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-9lvtk_openstack-operators(33ba2b4e-6239-43c0-a694-6495b7ae2ba3)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" podUID="33ba2b4e-6239-43c0-a694-6495b7ae2ba3" Nov 26 05:54:36 crc kubenswrapper[4871]: I1126 05:54:36.510449 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:54:36 crc kubenswrapper[4871]: E1126 05:54:36.510835 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:54:36 crc kubenswrapper[4871]: I1126 05:54:36.533941 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:36 crc kubenswrapper[4871]: I1126 05:54:36.534366 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:36 crc kubenswrapper[4871]: I1126 05:54:36.543113 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:37 crc kubenswrapper[4871]: I1126 05:54:37.554683 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" Nov 26 05:54:37 crc kubenswrapper[4871]: I1126 05:54:37.555398 4871 scope.go:117] "RemoveContainer" containerID="caa2e48a4dc902ee736d46b8875c4cfa81b4a6939a7821fc7a8e1c8d411fe383" Nov 26 05:54:37 crc kubenswrapper[4871]: E1126 05:54:37.555746 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-645b9949f7-48k8g_metallb-system(0f2d5628-2ad3-400c-bc77-b0251683a83a)\"" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" podUID="0f2d5628-2ad3-400c-bc77-b0251683a83a" Nov 26 05:54:39 crc kubenswrapper[4871]: I1126 05:54:39.063457 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="e20fd17b-5b64-4272-9876-347ea057aa04" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 26 05:54:39 crc kubenswrapper[4871]: I1126 05:54:39.141140 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:54:39 crc kubenswrapper[4871]: I1126 05:54:39.142928 4871 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 26 05:54:39 crc kubenswrapper[4871]: I1126 05:54:39.142994 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 26 05:54:39 crc kubenswrapper[4871]: I1126 05:54:39.370145 4871 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:39 crc kubenswrapper[4871]: I1126 05:54:39.444742 4871 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="c16a399d-4acf-46bb-a9d4-0159fa7f91e4" Nov 26 05:54:39 crc kubenswrapper[4871]: I1126 05:54:39.446129 4871 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8a2d2c7b-d0d7-40ac-b144-caf1cefe0993" Nov 26 05:54:39 crc kubenswrapper[4871]: I1126 05:54:39.446167 4871 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8a2d2c7b-d0d7-40ac-b144-caf1cefe0993" Nov 26 05:54:39 crc kubenswrapper[4871]: I1126 05:54:39.493597 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:54:39 crc kubenswrapper[4871]: I1126 05:54:39.869719 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" Nov 26 05:54:40 crc kubenswrapper[4871]: I1126 05:54:40.461057 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" event={"ID":"e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa","Type":"ContainerDied","Data":"4ae7e4881d2a161f01c90b78c961001779906343560baee357348d60e36dbf0b"} Nov 26 05:54:40 crc kubenswrapper[4871]: I1126 05:54:40.461144 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ae7e4881d2a161f01c90b78c961001779906343560baee357348d60e36dbf0b" Nov 26 05:54:40 crc kubenswrapper[4871]: I1126 05:54:40.461099 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt" Nov 26 05:54:40 crc kubenswrapper[4871]: I1126 05:54:40.627064 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa-inventory\") pod \"e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa\" (UID: \"e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa\") " Nov 26 05:54:40 crc kubenswrapper[4871]: I1126 05:54:40.627415 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4zm9\" (UniqueName: \"kubernetes.io/projected/e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa-kube-api-access-s4zm9\") pod \"e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa\" (UID: \"e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa\") " Nov 26 05:54:40 crc kubenswrapper[4871]: I1126 05:54:40.627488 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa-ssh-key\") pod \"e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa\" (UID: \"e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa\") " Nov 26 05:54:40 crc kubenswrapper[4871]: I1126 05:54:40.634701 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa-kube-api-access-s4zm9" (OuterVolumeSpecName: "kube-api-access-s4zm9") pod "e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa" (UID: "e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa"). InnerVolumeSpecName "kube-api-access-s4zm9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:54:40 crc kubenswrapper[4871]: I1126 05:54:40.675795 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa-inventory" (OuterVolumeSpecName: "inventory") pod "e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa" (UID: "e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:54:40 crc kubenswrapper[4871]: I1126 05:54:40.677223 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa" (UID: "e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:54:40 crc kubenswrapper[4871]: I1126 05:54:40.734162 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4zm9\" (UniqueName: \"kubernetes.io/projected/e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa-kube-api-access-s4zm9\") on node \"crc\" DevicePath \"\"" Nov 26 05:54:40 crc kubenswrapper[4871]: I1126 05:54:40.734201 4871 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 05:54:40 crc kubenswrapper[4871]: I1126 05:54:40.734215 4871 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 05:54:41 crc kubenswrapper[4871]: I1126 05:54:41.470386 4871 generic.go:334] "Generic (PLEG): container finished" podID="d78961c7-c9ff-4550-bf75-add0fcef53fe" containerID="3b2f9888499828a8b3269d3242740a4cd2e327e8c488ac4cdf2a16230027d05e" exitCode=1 Nov 26 05:54:41 crc kubenswrapper[4871]: I1126 05:54:41.470455 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-bp9px" event={"ID":"d78961c7-c9ff-4550-bf75-add0fcef53fe","Type":"ContainerDied","Data":"3b2f9888499828a8b3269d3242740a4cd2e327e8c488ac4cdf2a16230027d05e"} Nov 26 05:54:41 crc kubenswrapper[4871]: I1126 05:54:41.472316 4871 scope.go:117] "RemoveContainer" containerID="3b2f9888499828a8b3269d3242740a4cd2e327e8c488ac4cdf2a16230027d05e" Nov 26 05:54:41 crc kubenswrapper[4871]: I1126 05:54:41.474358 4871 generic.go:334] "Generic (PLEG): container finished" podID="6b5541da-9198-4f49-998b-1bfd982089d1" containerID="485b7809110277357899f2ad77f5ee26c11ef81bd52aefaebf5d24da22de60c7" exitCode=1 Nov 26 05:54:41 crc kubenswrapper[4871]: I1126 05:54:41.474418 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" event={"ID":"6b5541da-9198-4f49-998b-1bfd982089d1","Type":"ContainerDied","Data":"485b7809110277357899f2ad77f5ee26c11ef81bd52aefaebf5d24da22de60c7"} Nov 26 05:54:41 crc kubenswrapper[4871]: I1126 05:54:41.475217 4871 scope.go:117] "RemoveContainer" containerID="485b7809110277357899f2ad77f5ee26c11ef81bd52aefaebf5d24da22de60c7" Nov 26 05:54:41 crc kubenswrapper[4871]: I1126 05:54:41.535560 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:41 crc kubenswrapper[4871]: I1126 05:54:41.535925 4871 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8a2d2c7b-d0d7-40ac-b144-caf1cefe0993" Nov 26 05:54:41 crc kubenswrapper[4871]: I1126 05:54:41.535949 4871 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8a2d2c7b-d0d7-40ac-b144-caf1cefe0993" Nov 26 05:54:42 crc kubenswrapper[4871]: I1126 05:54:42.486717 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" event={"ID":"6b5541da-9198-4f49-998b-1bfd982089d1","Type":"ContainerStarted","Data":"2ae193df2962277901c64acea2614755b63ec7f0de6f8dc00d69e5b1a3818e7b"} Nov 26 05:54:42 crc kubenswrapper[4871]: I1126 05:54:42.487362 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" Nov 26 05:54:42 crc kubenswrapper[4871]: I1126 05:54:42.489744 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-bp9px" event={"ID":"d78961c7-c9ff-4550-bf75-add0fcef53fe","Type":"ContainerStarted","Data":"affaa247d5f3590d5ff8bce0fad211c09f1d8819ecf5c8e29eed5f075ab0db1b"} Nov 26 05:54:42 crc kubenswrapper[4871]: I1126 05:54:42.489931 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-bp9px" Nov 26 05:54:42 crc kubenswrapper[4871]: I1126 05:54:42.584953 4871 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="c16a399d-4acf-46bb-a9d4-0159fa7f91e4" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.541074 4871 generic.go:334] "Generic (PLEG): container finished" podID="ea13fc75-b3f0-48d3-9d86-5262df2957eb" containerID="f7682c83668d74986e88f4af496b13e76c610148c3d3ceeb6b0e80d3fd97ad09" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.541717 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" event={"ID":"ea13fc75-b3f0-48d3-9d86-5262df2957eb","Type":"ContainerDied","Data":"f7682c83668d74986e88f4af496b13e76c610148c3d3ceeb6b0e80d3fd97ad09"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.543286 4871 scope.go:117] "RemoveContainer" containerID="f7682c83668d74986e88f4af496b13e76c610148c3d3ceeb6b0e80d3fd97ad09" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.544862 4871 generic.go:334] "Generic (PLEG): container finished" podID="2c7b5f25-e4ef-4abd-ba84-61b98f194ddd" containerID="c5e2d8d67778c2905e59eb3be4d5f874f0a3fcf1530a721be5cf754554d7bd7e" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.544941 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" event={"ID":"2c7b5f25-e4ef-4abd-ba84-61b98f194ddd","Type":"ContainerDied","Data":"c5e2d8d67778c2905e59eb3be4d5f874f0a3fcf1530a721be5cf754554d7bd7e"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.546136 4871 scope.go:117] "RemoveContainer" containerID="c5e2d8d67778c2905e59eb3be4d5f874f0a3fcf1530a721be5cf754554d7bd7e" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.549244 4871 generic.go:334] "Generic (PLEG): container finished" podID="1b4fb0bb-1050-4bda-acf4-c3efafc79e4a" containerID="d36a40705e9b7284171531fab3a7cac3fc3e28538ef15d4e62de5ed4e3b8d8fd" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.549321 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z" event={"ID":"1b4fb0bb-1050-4bda-acf4-c3efafc79e4a","Type":"ContainerDied","Data":"d36a40705e9b7284171531fab3a7cac3fc3e28538ef15d4e62de5ed4e3b8d8fd"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.551125 4871 scope.go:117] "RemoveContainer" containerID="d36a40705e9b7284171531fab3a7cac3fc3e28538ef15d4e62de5ed4e3b8d8fd" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.552589 4871 generic.go:334] "Generic (PLEG): container finished" podID="9253bdc4-d16f-42eb-8704-0965e99dfe47" containerID="05f930210853fa853fe48dd63df7a0baec1e3eccccd99d2ef95036a9d87d5f33" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.552678 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" event={"ID":"9253bdc4-d16f-42eb-8704-0965e99dfe47","Type":"ContainerDied","Data":"05f930210853fa853fe48dd63df7a0baec1e3eccccd99d2ef95036a9d87d5f33"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.553615 4871 scope.go:117] "RemoveContainer" containerID="05f930210853fa853fe48dd63df7a0baec1e3eccccd99d2ef95036a9d87d5f33" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.556833 4871 generic.go:334] "Generic (PLEG): container finished" podID="6ccd73b2-dbfd-4cd6-845c-a61af4f20f96" containerID="204903e2609664bdf3ba8c0292a1dce6d4ca30f7128cf5430e02f6daeee562fb" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.556909 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" event={"ID":"6ccd73b2-dbfd-4cd6-845c-a61af4f20f96","Type":"ContainerDied","Data":"204903e2609664bdf3ba8c0292a1dce6d4ca30f7128cf5430e02f6daeee562fb"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.557775 4871 scope.go:117] "RemoveContainer" containerID="204903e2609664bdf3ba8c0292a1dce6d4ca30f7128cf5430e02f6daeee562fb" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.558836 4871 generic.go:334] "Generic (PLEG): container finished" podID="0b2406e7-8b16-45e1-b726-645d22421af5" containerID="7fe791832885cb9163ead7515fa99cf130c635e5f80e0ae2fe08996fff79a99d" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.558896 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" event={"ID":"0b2406e7-8b16-45e1-b726-645d22421af5","Type":"ContainerDied","Data":"7fe791832885cb9163ead7515fa99cf130c635e5f80e0ae2fe08996fff79a99d"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.559255 4871 scope.go:117] "RemoveContainer" containerID="7fe791832885cb9163ead7515fa99cf130c635e5f80e0ae2fe08996fff79a99d" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.563718 4871 generic.go:334] "Generic (PLEG): container finished" podID="19a75285-dcb7-4f34-b79c-613c96d555de" containerID="23666982ea9d1c13be5b1cf0918bbcf7f5e576b5ef3c678f0a4d7f13cc70fe8c" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.563819 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" event={"ID":"19a75285-dcb7-4f34-b79c-613c96d555de","Type":"ContainerDied","Data":"23666982ea9d1c13be5b1cf0918bbcf7f5e576b5ef3c678f0a4d7f13cc70fe8c"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.564594 4871 scope.go:117] "RemoveContainer" containerID="23666982ea9d1c13be5b1cf0918bbcf7f5e576b5ef3c678f0a4d7f13cc70fe8c" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.568961 4871 generic.go:334] "Generic (PLEG): container finished" podID="06b4e3ae-765b-41c4-9334-4e33c2dc305f" containerID="e77d7460d38ef447f45dff46723b8bac6ac72e9643ab7b192218ee88cae7763e" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.569009 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" event={"ID":"06b4e3ae-765b-41c4-9334-4e33c2dc305f","Type":"ContainerDied","Data":"e77d7460d38ef447f45dff46723b8bac6ac72e9643ab7b192218ee88cae7763e"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.569364 4871 scope.go:117] "RemoveContainer" containerID="e77d7460d38ef447f45dff46723b8bac6ac72e9643ab7b192218ee88cae7763e" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.577818 4871 generic.go:334] "Generic (PLEG): container finished" podID="6d7ff4ed-503b-4184-8633-47598150b7f0" containerID="fabd64274ddc70e6258b0580259ca3c9ad99d6c7e81edaba0f0e641ca6684029" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.577915 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" event={"ID":"6d7ff4ed-503b-4184-8633-47598150b7f0","Type":"ContainerDied","Data":"fabd64274ddc70e6258b0580259ca3c9ad99d6c7e81edaba0f0e641ca6684029"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.578629 4871 scope.go:117] "RemoveContainer" containerID="fabd64274ddc70e6258b0580259ca3c9ad99d6c7e81edaba0f0e641ca6684029" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.583274 4871 generic.go:334] "Generic (PLEG): container finished" podID="4b0778b1-b974-4ce6-bac4-59920ab67dd7" containerID="d9772fd8b68909e3ca0767e5b83edbc3a379833a1b710577a69ebe4c62e4b56f" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.583352 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" event={"ID":"4b0778b1-b974-4ce6-bac4-59920ab67dd7","Type":"ContainerDied","Data":"d9772fd8b68909e3ca0767e5b83edbc3a379833a1b710577a69ebe4c62e4b56f"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.584047 4871 scope.go:117] "RemoveContainer" containerID="d9772fd8b68909e3ca0767e5b83edbc3a379833a1b710577a69ebe4c62e4b56f" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.588384 4871 generic.go:334] "Generic (PLEG): container finished" podID="8c65e9f4-e3de-4bce-851a-f85c1036daa7" containerID="fa5104c42e1d8c3f12c9876844b14e0c381b104561c91233e350583da19f347a" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.588476 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" event={"ID":"8c65e9f4-e3de-4bce-851a-f85c1036daa7","Type":"ContainerDied","Data":"fa5104c42e1d8c3f12c9876844b14e0c381b104561c91233e350583da19f347a"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.589243 4871 scope.go:117] "RemoveContainer" containerID="fa5104c42e1d8c3f12c9876844b14e0c381b104561c91233e350583da19f347a" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.591040 4871 generic.go:334] "Generic (PLEG): container finished" podID="6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c" containerID="52ed078c3899a8e0ea1cd7891c5a68e3843463d688ccbfb103883eace4e0e360" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.591277 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" event={"ID":"6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c","Type":"ContainerDied","Data":"52ed078c3899a8e0ea1cd7891c5a68e3843463d688ccbfb103883eace4e0e360"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.592197 4871 scope.go:117] "RemoveContainer" containerID="52ed078c3899a8e0ea1cd7891c5a68e3843463d688ccbfb103883eace4e0e360" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.597564 4871 generic.go:334] "Generic (PLEG): container finished" podID="8d32351e-c0cc-4c2a-89b2-a79b61cf632e" containerID="49b6c7b8dd8536e2c3295be101ee08b218c4f0fb7f20daa6a4e49204e8d5816d" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.597655 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" event={"ID":"8d32351e-c0cc-4c2a-89b2-a79b61cf632e","Type":"ContainerDied","Data":"49b6c7b8dd8536e2c3295be101ee08b218c4f0fb7f20daa6a4e49204e8d5816d"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.598344 4871 scope.go:117] "RemoveContainer" containerID="49b6c7b8dd8536e2c3295be101ee08b218c4f0fb7f20daa6a4e49204e8d5816d" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.602683 4871 generic.go:334] "Generic (PLEG): container finished" podID="94ce6277-5176-415b-9f4d-847a73c93723" containerID="ac9ba9f70165f33e5d5356665cee826036e01cc5dba695debaad06db9b1520b4" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.602761 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" event={"ID":"94ce6277-5176-415b-9f4d-847a73c93723","Type":"ContainerDied","Data":"ac9ba9f70165f33e5d5356665cee826036e01cc5dba695debaad06db9b1520b4"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.603420 4871 scope.go:117] "RemoveContainer" containerID="ac9ba9f70165f33e5d5356665cee826036e01cc5dba695debaad06db9b1520b4" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.606258 4871 generic.go:334] "Generic (PLEG): container finished" podID="f68377a4-dee0-404b-988a-4f0673466e62" containerID="cd67ed0a097dfab3036663f2143ae697d23ba1e621e97bb593ebb6b539890fb8" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.606340 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" event={"ID":"f68377a4-dee0-404b-988a-4f0673466e62","Type":"ContainerDied","Data":"cd67ed0a097dfab3036663f2143ae697d23ba1e621e97bb593ebb6b539890fb8"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.607001 4871 scope.go:117] "RemoveContainer" containerID="cd67ed0a097dfab3036663f2143ae697d23ba1e621e97bb593ebb6b539890fb8" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.615686 4871 generic.go:334] "Generic (PLEG): container finished" podID="4659b831-32eb-4da2-97f3-f654a299605e" containerID="cbadabae3fcd702f590e75f116e55d5e3cf61fe5ed0dbb95e07144b480bf4ff5" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.615763 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" event={"ID":"4659b831-32eb-4da2-97f3-f654a299605e","Type":"ContainerDied","Data":"cbadabae3fcd702f590e75f116e55d5e3cf61fe5ed0dbb95e07144b480bf4ff5"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.616657 4871 scope.go:117] "RemoveContainer" containerID="cbadabae3fcd702f590e75f116e55d5e3cf61fe5ed0dbb95e07144b480bf4ff5" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.618096 4871 generic.go:334] "Generic (PLEG): container finished" podID="1cc75505-b927-488b-8a16-4fda9a1c2dca" containerID="19029519a721c5302346221f5c769fdd20e20118b672d0b32843135dcb6b5823" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.618166 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" event={"ID":"1cc75505-b927-488b-8a16-4fda9a1c2dca","Type":"ContainerDied","Data":"19029519a721c5302346221f5c769fdd20e20118b672d0b32843135dcb6b5823"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.618572 4871 scope.go:117] "RemoveContainer" containerID="19029519a721c5302346221f5c769fdd20e20118b672d0b32843135dcb6b5823" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.620901 4871 generic.go:334] "Generic (PLEG): container finished" podID="974fe30e-68b5-42bb-9940-a2000ab315f8" containerID="729788fa0846702a61a6484701aa0fb0b413761972b78d47bc32a45d763469e9" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.620947 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" event={"ID":"974fe30e-68b5-42bb-9940-a2000ab315f8","Type":"ContainerDied","Data":"729788fa0846702a61a6484701aa0fb0b413761972b78d47bc32a45d763469e9"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.622249 4871 scope.go:117] "RemoveContainer" containerID="729788fa0846702a61a6484701aa0fb0b413761972b78d47bc32a45d763469e9" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.623518 4871 generic.go:334] "Generic (PLEG): container finished" podID="70168336-54b1-481f-b6a0-d565be07d353" containerID="3088974cdf419f4d04057f0b9810ee6be73ae55ba8a84ff6b8e28058fb4afb0e" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.623589 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" event={"ID":"70168336-54b1-481f-b6a0-d565be07d353","Type":"ContainerDied","Data":"3088974cdf419f4d04057f0b9810ee6be73ae55ba8a84ff6b8e28058fb4afb0e"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.624786 4871 scope.go:117] "RemoveContainer" containerID="3088974cdf419f4d04057f0b9810ee6be73ae55ba8a84ff6b8e28058fb4afb0e" Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.629004 4871 generic.go:334] "Generic (PLEG): container finished" podID="32cd59dd-1a82-4fce-81b1-ebc8f75f1e93" containerID="10c02f89c12d91aa6c8a4af1b3e95d4857df0e1d88a4fff291795f84a734520f" exitCode=1 Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.629042 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" event={"ID":"32cd59dd-1a82-4fce-81b1-ebc8f75f1e93","Type":"ContainerDied","Data":"10c02f89c12d91aa6c8a4af1b3e95d4857df0e1d88a4fff291795f84a734520f"} Nov 26 05:54:44 crc kubenswrapper[4871]: I1126 05:54:44.629788 4871 scope.go:117] "RemoveContainer" containerID="10c02f89c12d91aa6c8a4af1b3e95d4857df0e1d88a4fff291795f84a734520f" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.273480 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.273848 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.286337 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.286442 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.300239 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.300292 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.314712 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.314768 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.320025 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.320093 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.345227 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.345289 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.373144 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.373219 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.526210 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.527000 4871 scope.go:117] "RemoveContainer" containerID="53e982c8e34d28ea9e2fa9a75c1f02dbb078d4325b8653ab41ffd5e5af9f25be" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.591848 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.591893 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.629273 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.629769 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.647585 4871 generic.go:334] "Generic (PLEG): container finished" podID="19a75285-dcb7-4f34-b79c-613c96d555de" containerID="58cfb7064120bb23c71a25a1edfbe171fff9d3ce52b7b9bce0dcb2113560a8d1" exitCode=1 Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.647668 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" event={"ID":"19a75285-dcb7-4f34-b79c-613c96d555de","Type":"ContainerDied","Data":"58cfb7064120bb23c71a25a1edfbe171fff9d3ce52b7b9bce0dcb2113560a8d1"} Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.647712 4871 scope.go:117] "RemoveContainer" containerID="23666982ea9d1c13be5b1cf0918bbcf7f5e576b5ef3c678f0a4d7f13cc70fe8c" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.648677 4871 scope.go:117] "RemoveContainer" containerID="58cfb7064120bb23c71a25a1edfbe171fff9d3ce52b7b9bce0dcb2113560a8d1" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.648850 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.648898 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" Nov 26 05:54:45 crc kubenswrapper[4871]: E1126 05:54:45.648936 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-6fdcddb789-6lpnj_openstack-operators(19a75285-dcb7-4f34-b79c-613c96d555de)\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" podUID="19a75285-dcb7-4f34-b79c-613c96d555de" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.652624 4871 generic.go:334] "Generic (PLEG): container finished" podID="06b4e3ae-765b-41c4-9334-4e33c2dc305f" containerID="d6be69d1ebb14f1d555e49c61ea1fe237f68102cb74b6ab0db7cc05c9ce463e6" exitCode=1 Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.652698 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" event={"ID":"06b4e3ae-765b-41c4-9334-4e33c2dc305f","Type":"ContainerDied","Data":"d6be69d1ebb14f1d555e49c61ea1fe237f68102cb74b6ab0db7cc05c9ce463e6"} Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.653193 4871 scope.go:117] "RemoveContainer" containerID="d6be69d1ebb14f1d555e49c61ea1fe237f68102cb74b6ab0db7cc05c9ce463e6" Nov 26 05:54:45 crc kubenswrapper[4871]: E1126 05:54:45.653476 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-x5hqw_openstack-operators(06b4e3ae-765b-41c4-9334-4e33c2dc305f)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" podUID="06b4e3ae-765b-41c4-9334-4e33c2dc305f" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.657514 4871 generic.go:334] "Generic (PLEG): container finished" podID="ea13fc75-b3f0-48d3-9d86-5262df2957eb" containerID="9b9f167ae078dc80d024412e540d6a6a879a16cca9bb6a2b52e78e92d4ba7bc0" exitCode=1 Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.657631 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" event={"ID":"ea13fc75-b3f0-48d3-9d86-5262df2957eb","Type":"ContainerDied","Data":"9b9f167ae078dc80d024412e540d6a6a879a16cca9bb6a2b52e78e92d4ba7bc0"} Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.658217 4871 scope.go:117] "RemoveContainer" containerID="9b9f167ae078dc80d024412e540d6a6a879a16cca9bb6a2b52e78e92d4ba7bc0" Nov 26 05:54:45 crc kubenswrapper[4871]: E1126 05:54:45.658465 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-6b7f75547b-wmwwk_openstack-operators(ea13fc75-b3f0-48d3-9d86-5262df2957eb)\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" podUID="ea13fc75-b3f0-48d3-9d86-5262df2957eb" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.665545 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.665588 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.675822 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.677034 4871 scope.go:117] "RemoveContainer" containerID="d5abb2da6d5d4dbb1d73ea36dde21af7e95ad0c3292da57bbc0125a7eb754783" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.681474 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.686089 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" event={"ID":"0b2406e7-8b16-45e1-b726-645d22421af5","Type":"ContainerStarted","Data":"d476f3b75a10f7ec53e2324e30cfc01a24ae903827a2e155550f04a8026e951d"} Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.691893 4871 generic.go:334] "Generic (PLEG): container finished" podID="974fe30e-68b5-42bb-9940-a2000ab315f8" containerID="2cfcc0bf07b99a341a22e1a6e653e6c5663fbd6eeb5366a3072212f713e93375" exitCode=1 Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.691957 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" event={"ID":"974fe30e-68b5-42bb-9940-a2000ab315f8","Type":"ContainerDied","Data":"2cfcc0bf07b99a341a22e1a6e653e6c5663fbd6eeb5366a3072212f713e93375"} Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.692407 4871 scope.go:117] "RemoveContainer" containerID="2cfcc0bf07b99a341a22e1a6e653e6c5663fbd6eeb5366a3072212f713e93375" Nov 26 05:54:45 crc kubenswrapper[4871]: E1126 05:54:45.692655 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-76cc84c6bb-6kccm_openstack-operators(974fe30e-68b5-42bb-9940-a2000ab315f8)\"" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" podUID="974fe30e-68b5-42bb-9940-a2000ab315f8" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.698055 4871 generic.go:334] "Generic (PLEG): container finished" podID="6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c" containerID="a9b994af78dbf219dda52f4dac268189619d8e9c193679afdf985c97101b404f" exitCode=1 Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.698122 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" event={"ID":"6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c","Type":"ContainerDied","Data":"a9b994af78dbf219dda52f4dac268189619d8e9c193679afdf985c97101b404f"} Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.698965 4871 scope.go:117] "RemoveContainer" containerID="a9b994af78dbf219dda52f4dac268189619d8e9c193679afdf985c97101b404f" Nov 26 05:54:45 crc kubenswrapper[4871]: E1126 05:54:45.699283 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-5d499bf58b-jvztg_openstack-operators(6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c)\"" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" podUID="6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.700153 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" event={"ID":"70168336-54b1-481f-b6a0-d565be07d353","Type":"ContainerStarted","Data":"804979701f046e843b38eb1d96df3b676c7b5875ca0fc0cd135a1ea1d3a271b4"} Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.700692 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.703320 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" event={"ID":"94ce6277-5176-415b-9f4d-847a73c93723","Type":"ContainerStarted","Data":"8136fb1a07b7db07a40248a6e25afcabfc782d8202c8880ff4ec160ec4936936"} Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.703820 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.705235 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" event={"ID":"1cc75505-b927-488b-8a16-4fda9a1c2dca","Type":"ContainerStarted","Data":"7a08aa908047d89602c6884e9a07c225e9ef97a697bc50b69575e894fa7a5e44"} Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.705387 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.708614 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.708669 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.710866 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" event={"ID":"32cd59dd-1a82-4fce-81b1-ebc8f75f1e93","Type":"ContainerStarted","Data":"f15dd1970a46e60fe57ee0f6a44c3b8ca5989904386ea15498f1a8a4f5539699"} Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.711410 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.717440 4871 generic.go:334] "Generic (PLEG): container finished" podID="6ccd73b2-dbfd-4cd6-845c-a61af4f20f96" containerID="6ee456f70d5060a5ddf0985f51451ea0a8cf11c655fcc122b957a3c640b70298" exitCode=1 Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.717511 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" event={"ID":"6ccd73b2-dbfd-4cd6-845c-a61af4f20f96","Type":"ContainerDied","Data":"6ee456f70d5060a5ddf0985f51451ea0a8cf11c655fcc122b957a3c640b70298"} Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.718539 4871 scope.go:117] "RemoveContainer" containerID="6ee456f70d5060a5ddf0985f51451ea0a8cf11c655fcc122b957a3c640b70298" Nov 26 05:54:45 crc kubenswrapper[4871]: E1126 05:54:45.718794 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ovn-operator-controller-manager-56897c768d-shgb6_openstack-operators(6ccd73b2-dbfd-4cd6-845c-a61af4f20f96)\"" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" podUID="6ccd73b2-dbfd-4cd6-845c-a61af4f20f96" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.722141 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" event={"ID":"9253bdc4-d16f-42eb-8704-0965e99dfe47","Type":"ContainerStarted","Data":"e6ce7f632cb150e31becd63927b1a2150ffbc2b29849ad40ef42db2d2ba52804"} Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.722753 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.735157 4871 generic.go:334] "Generic (PLEG): container finished" podID="2c7b5f25-e4ef-4abd-ba84-61b98f194ddd" containerID="b1651a013485aa1e98c052ade5bfde7694f5f95c7265ed1ac1c33b7d9230f034" exitCode=1 Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.735230 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" event={"ID":"2c7b5f25-e4ef-4abd-ba84-61b98f194ddd","Type":"ContainerDied","Data":"b1651a013485aa1e98c052ade5bfde7694f5f95c7265ed1ac1c33b7d9230f034"} Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.735958 4871 scope.go:117] "RemoveContainer" containerID="b1651a013485aa1e98c052ade5bfde7694f5f95c7265ed1ac1c33b7d9230f034" Nov 26 05:54:45 crc kubenswrapper[4871]: E1126 05:54:45.736263 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-66f4dd4bc7-9xghq_openstack-operators(2c7b5f25-e4ef-4abd-ba84-61b98f194ddd)\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" podUID="2c7b5f25-e4ef-4abd-ba84-61b98f194ddd" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.754366 4871 generic.go:334] "Generic (PLEG): container finished" podID="4b0778b1-b974-4ce6-bac4-59920ab67dd7" containerID="54b8071bf3fc326e75b73bc1fdb21c03db57040b9ab9e4feed567fa35bb72290" exitCode=1 Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.754428 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" event={"ID":"4b0778b1-b974-4ce6-bac4-59920ab67dd7","Type":"ContainerDied","Data":"54b8071bf3fc326e75b73bc1fdb21c03db57040b9ab9e4feed567fa35bb72290"} Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.755185 4871 scope.go:117] "RemoveContainer" containerID="54b8071bf3fc326e75b73bc1fdb21c03db57040b9ab9e4feed567fa35bb72290" Nov 26 05:54:45 crc kubenswrapper[4871]: E1126 05:54:45.755488 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-d77b94747-skx5k_openstack-operators(4b0778b1-b974-4ce6-bac4-59920ab67dd7)\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" podUID="4b0778b1-b974-4ce6-bac4-59920ab67dd7" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.760918 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z" event={"ID":"1b4fb0bb-1050-4bda-acf4-c3efafc79e4a","Type":"ContainerStarted","Data":"381a9526b5493923a15d6c1ae9ba2b6f1ccde0197e93524945197aab9bb08b53"} Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.761145 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.765384 4871 generic.go:334] "Generic (PLEG): container finished" podID="8c65e9f4-e3de-4bce-851a-f85c1036daa7" containerID="1b4913dabb35fda193e0f05e91aab5c70dd4685198c87a3da4cca8ab6a266ac2" exitCode=1 Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.765457 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" event={"ID":"8c65e9f4-e3de-4bce-851a-f85c1036daa7","Type":"ContainerDied","Data":"1b4913dabb35fda193e0f05e91aab5c70dd4685198c87a3da4cca8ab6a266ac2"} Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.766007 4871 scope.go:117] "RemoveContainer" containerID="1b4913dabb35fda193e0f05e91aab5c70dd4685198c87a3da4cca8ab6a266ac2" Nov 26 05:54:45 crc kubenswrapper[4871]: E1126 05:54:45.766261 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-7b64f4fb85-bdpn4_openstack-operators(8c65e9f4-e3de-4bce-851a-f85c1036daa7)\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" podUID="8c65e9f4-e3de-4bce-851a-f85c1036daa7" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.768192 4871 generic.go:334] "Generic (PLEG): container finished" podID="6d7ff4ed-503b-4184-8633-47598150b7f0" containerID="c03c3af92258a06129d108fd426c762d6405d0d37cd51332da502f43cc6b4d63" exitCode=1 Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.768262 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" event={"ID":"6d7ff4ed-503b-4184-8633-47598150b7f0","Type":"ContainerDied","Data":"c03c3af92258a06129d108fd426c762d6405d0d37cd51332da502f43cc6b4d63"} Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.768729 4871 scope.go:117] "RemoveContainer" containerID="c03c3af92258a06129d108fd426c762d6405d0d37cd51332da502f43cc6b4d63" Nov 26 05:54:45 crc kubenswrapper[4871]: E1126 05:54:45.768940 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-56868586f6-2v8hx_openstack-operators(6d7ff4ed-503b-4184-8633-47598150b7f0)\"" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" podUID="6d7ff4ed-503b-4184-8633-47598150b7f0" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.774061 4871 generic.go:334] "Generic (PLEG): container finished" podID="8d32351e-c0cc-4c2a-89b2-a79b61cf632e" containerID="e43847e9233ad2f61c1671e9e9f336861270e81a6887e3b7be510be1c662344e" exitCode=1 Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.774094 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" event={"ID":"8d32351e-c0cc-4c2a-89b2-a79b61cf632e","Type":"ContainerDied","Data":"e43847e9233ad2f61c1671e9e9f336861270e81a6887e3b7be510be1c662344e"} Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.775020 4871 scope.go:117] "RemoveContainer" containerID="e43847e9233ad2f61c1671e9e9f336861270e81a6887e3b7be510be1c662344e" Nov 26 05:54:45 crc kubenswrapper[4871]: E1126 05:54:45.775467 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-656dcb59d4-v95x7_openstack-operators(8d32351e-c0cc-4c2a-89b2-a79b61cf632e)\"" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" podUID="8d32351e-c0cc-4c2a-89b2-a79b61cf632e" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.778822 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" event={"ID":"f68377a4-dee0-404b-988a-4f0673466e62","Type":"ContainerStarted","Data":"d3e843dfb2b36b2984f6143425353086c025df5ad8c3aaf090607e269c3214ec"} Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.779471 4871 scope.go:117] "RemoveContainer" containerID="d3e843dfb2b36b2984f6143425353086c025df5ad8c3aaf090607e269c3214ec" Nov 26 05:54:45 crc kubenswrapper[4871]: E1126 05:54:45.779892 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-rlr55_openstack-operators(f68377a4-dee0-404b-988a-4f0673466e62)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" podUID="f68377a4-dee0-404b-988a-4f0673466e62" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.808891 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.810998 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.811121 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.874991 4871 scope.go:117] "RemoveContainer" containerID="e77d7460d38ef447f45dff46723b8bac6ac72e9643ab7b192218ee88cae7763e" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.894788 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.894828 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" Nov 26 05:54:45 crc kubenswrapper[4871]: I1126 05:54:45.985712 4871 scope.go:117] "RemoveContainer" containerID="f7682c83668d74986e88f4af496b13e76c610148c3d3ceeb6b0e80d3fd97ad09" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.022885 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-ds5gp" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.045693 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.045748 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.072264 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.081140 4871 scope.go:117] "RemoveContainer" containerID="729788fa0846702a61a6484701aa0fb0b413761972b78d47bc32a45d763469e9" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.109513 4871 scope.go:117] "RemoveContainer" containerID="52ed078c3899a8e0ea1cd7891c5a68e3843463d688ccbfb103883eace4e0e360" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.133240 4871 scope.go:117] "RemoveContainer" containerID="204903e2609664bdf3ba8c0292a1dce6d4ca30f7128cf5430e02f6daeee562fb" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.177052 4871 scope.go:117] "RemoveContainer" containerID="c5e2d8d67778c2905e59eb3be4d5f874f0a3fcf1530a721be5cf754554d7bd7e" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.223852 4871 scope.go:117] "RemoveContainer" containerID="d9772fd8b68909e3ca0767e5b83edbc3a379833a1b710577a69ebe4c62e4b56f" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.263779 4871 scope.go:117] "RemoveContainer" containerID="fa5104c42e1d8c3f12c9876844b14e0c381b104561c91233e350583da19f347a" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.298077 4871 scope.go:117] "RemoveContainer" containerID="fabd64274ddc70e6258b0580259ca3c9ad99d6c7e81edaba0f0e641ca6684029" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.336045 4871 scope.go:117] "RemoveContainer" containerID="49b6c7b8dd8536e2c3295be101ee08b218c4f0fb7f20daa6a4e49204e8d5816d" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.791590 4871 generic.go:334] "Generic (PLEG): container finished" podID="0b2406e7-8b16-45e1-b726-645d22421af5" containerID="d476f3b75a10f7ec53e2324e30cfc01a24ae903827a2e155550f04a8026e951d" exitCode=1 Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.791686 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" event={"ID":"0b2406e7-8b16-45e1-b726-645d22421af5","Type":"ContainerDied","Data":"d476f3b75a10f7ec53e2324e30cfc01a24ae903827a2e155550f04a8026e951d"} Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.792007 4871 scope.go:117] "RemoveContainer" containerID="7fe791832885cb9163ead7515fa99cf130c635e5f80e0ae2fe08996fff79a99d" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.792638 4871 scope.go:117] "RemoveContainer" containerID="d476f3b75a10f7ec53e2324e30cfc01a24ae903827a2e155550f04a8026e951d" Nov 26 05:54:46 crc kubenswrapper[4871]: E1126 05:54:46.793066 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=operator pod=rabbitmq-cluster-operator-manager-668c99d594-6c6pc_openstack-operators(0b2406e7-8b16-45e1-b726-645d22421af5)\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" podUID="0b2406e7-8b16-45e1-b726-645d22421af5" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.803014 4871 scope.go:117] "RemoveContainer" containerID="2cfcc0bf07b99a341a22e1a6e653e6c5663fbd6eeb5366a3072212f713e93375" Nov 26 05:54:46 crc kubenswrapper[4871]: E1126 05:54:46.803680 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-76cc84c6bb-6kccm_openstack-operators(974fe30e-68b5-42bb-9940-a2000ab315f8)\"" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" podUID="974fe30e-68b5-42bb-9940-a2000ab315f8" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.816120 4871 scope.go:117] "RemoveContainer" containerID="e43847e9233ad2f61c1671e9e9f336861270e81a6887e3b7be510be1c662344e" Nov 26 05:54:46 crc kubenswrapper[4871]: E1126 05:54:46.816491 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-656dcb59d4-v95x7_openstack-operators(8d32351e-c0cc-4c2a-89b2-a79b61cf632e)\"" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" podUID="8d32351e-c0cc-4c2a-89b2-a79b61cf632e" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.824813 4871 generic.go:334] "Generic (PLEG): container finished" podID="51410db5-d309-4625-8f36-02cf8f0ba419" containerID="8e5c88c9818a4ab1833f0c6d78b0c3022927db845bb1c6ef9e37848443b4c8d3" exitCode=1 Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.824943 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" event={"ID":"51410db5-d309-4625-8f36-02cf8f0ba419","Type":"ContainerDied","Data":"8e5c88c9818a4ab1833f0c6d78b0c3022927db845bb1c6ef9e37848443b4c8d3"} Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.826044 4871 scope.go:117] "RemoveContainer" containerID="8e5c88c9818a4ab1833f0c6d78b0c3022927db845bb1c6ef9e37848443b4c8d3" Nov 26 05:54:46 crc kubenswrapper[4871]: E1126 05:54:46.826587 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-lzsqj_openstack-operators(51410db5-d309-4625-8f36-02cf8f0ba419)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" podUID="51410db5-d309-4625-8f36-02cf8f0ba419" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.832708 4871 generic.go:334] "Generic (PLEG): container finished" podID="4659b831-32eb-4da2-97f3-f654a299605e" containerID="e928a5d01b0df2a151431739d78e64fb7bf7a8623907e43210121abf106b50f5" exitCode=1 Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.832941 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" event={"ID":"4659b831-32eb-4da2-97f3-f654a299605e","Type":"ContainerDied","Data":"e928a5d01b0df2a151431739d78e64fb7bf7a8623907e43210121abf106b50f5"} Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.834044 4871 scope.go:117] "RemoveContainer" containerID="e928a5d01b0df2a151431739d78e64fb7bf7a8623907e43210121abf106b50f5" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.839326 4871 scope.go:117] "RemoveContainer" containerID="6ee456f70d5060a5ddf0985f51451ea0a8cf11c655fcc122b957a3c640b70298" Nov 26 05:54:46 crc kubenswrapper[4871]: E1126 05:54:46.839522 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-5d494799bf-clm5v_openstack-operators(4659b831-32eb-4da2-97f3-f654a299605e)\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" podUID="4659b831-32eb-4da2-97f3-f654a299605e" Nov 26 05:54:46 crc kubenswrapper[4871]: E1126 05:54:46.840286 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ovn-operator-controller-manager-56897c768d-shgb6_openstack-operators(6ccd73b2-dbfd-4cd6-845c-a61af4f20f96)\"" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" podUID="6ccd73b2-dbfd-4cd6-845c-a61af4f20f96" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.840450 4871 scope.go:117] "RemoveContainer" containerID="a9b994af78dbf219dda52f4dac268189619d8e9c193679afdf985c97101b404f" Nov 26 05:54:46 crc kubenswrapper[4871]: E1126 05:54:46.841358 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-5d499bf58b-jvztg_openstack-operators(6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c)\"" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" podUID="6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.861507 4871 scope.go:117] "RemoveContainer" containerID="9b9f167ae078dc80d024412e540d6a6a879a16cca9bb6a2b52e78e92d4ba7bc0" Nov 26 05:54:46 crc kubenswrapper[4871]: E1126 05:54:46.862014 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-6b7f75547b-wmwwk_openstack-operators(ea13fc75-b3f0-48d3-9d86-5262df2957eb)\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" podUID="ea13fc75-b3f0-48d3-9d86-5262df2957eb" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.865817 4871 scope.go:117] "RemoveContainer" containerID="b1651a013485aa1e98c052ade5bfde7694f5f95c7265ed1ac1c33b7d9230f034" Nov 26 05:54:46 crc kubenswrapper[4871]: E1126 05:54:46.866088 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-66f4dd4bc7-9xghq_openstack-operators(2c7b5f25-e4ef-4abd-ba84-61b98f194ddd)\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" podUID="2c7b5f25-e4ef-4abd-ba84-61b98f194ddd" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.871130 4871 scope.go:117] "RemoveContainer" containerID="54b8071bf3fc326e75b73bc1fdb21c03db57040b9ab9e4feed567fa35bb72290" Nov 26 05:54:46 crc kubenswrapper[4871]: E1126 05:54:46.871467 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-d77b94747-skx5k_openstack-operators(4b0778b1-b974-4ce6-bac4-59920ab67dd7)\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" podUID="4b0778b1-b974-4ce6-bac4-59920ab67dd7" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.874181 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" event={"ID":"33ba2b4e-6239-43c0-a694-6495b7ae2ba3","Type":"ContainerDied","Data":"98b213ed28ee131a605d80664fee98e4f538d59be7fadc670296bfda45dc6c00"} Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.874058 4871 generic.go:334] "Generic (PLEG): container finished" podID="33ba2b4e-6239-43c0-a694-6495b7ae2ba3" containerID="98b213ed28ee131a605d80664fee98e4f538d59be7fadc670296bfda45dc6c00" exitCode=1 Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.874949 4871 scope.go:117] "RemoveContainer" containerID="98b213ed28ee131a605d80664fee98e4f538d59be7fadc670296bfda45dc6c00" Nov 26 05:54:46 crc kubenswrapper[4871]: E1126 05:54:46.875282 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-9lvtk_openstack-operators(33ba2b4e-6239-43c0-a694-6495b7ae2ba3)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" podUID="33ba2b4e-6239-43c0-a694-6495b7ae2ba3" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.879660 4871 scope.go:117] "RemoveContainer" containerID="1b4913dabb35fda193e0f05e91aab5c70dd4685198c87a3da4cca8ab6a266ac2" Nov 26 05:54:46 crc kubenswrapper[4871]: E1126 05:54:46.879932 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-7b64f4fb85-bdpn4_openstack-operators(8c65e9f4-e3de-4bce-851a-f85c1036daa7)\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" podUID="8c65e9f4-e3de-4bce-851a-f85c1036daa7" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.881659 4871 scope.go:117] "RemoveContainer" containerID="53e982c8e34d28ea9e2fa9a75c1f02dbb078d4325b8653ab41ffd5e5af9f25be" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.882542 4871 generic.go:334] "Generic (PLEG): container finished" podID="70168336-54b1-481f-b6a0-d565be07d353" containerID="804979701f046e843b38eb1d96df3b676c7b5875ca0fc0cd135a1ea1d3a271b4" exitCode=1 Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.882598 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" event={"ID":"70168336-54b1-481f-b6a0-d565be07d353","Type":"ContainerDied","Data":"804979701f046e843b38eb1d96df3b676c7b5875ca0fc0cd135a1ea1d3a271b4"} Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.883564 4871 scope.go:117] "RemoveContainer" containerID="804979701f046e843b38eb1d96df3b676c7b5875ca0fc0cd135a1ea1d3a271b4" Nov 26 05:54:46 crc kubenswrapper[4871]: E1126 05:54:46.883805 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-955677c94-tsz49_openstack-operators(70168336-54b1-481f-b6a0-d565be07d353)\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" podUID="70168336-54b1-481f-b6a0-d565be07d353" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.888635 4871 generic.go:334] "Generic (PLEG): container finished" podID="f68377a4-dee0-404b-988a-4f0673466e62" containerID="d3e843dfb2b36b2984f6143425353086c025df5ad8c3aaf090607e269c3214ec" exitCode=1 Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.888690 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" event={"ID":"f68377a4-dee0-404b-988a-4f0673466e62","Type":"ContainerDied","Data":"d3e843dfb2b36b2984f6143425353086c025df5ad8c3aaf090607e269c3214ec"} Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.889221 4871 scope.go:117] "RemoveContainer" containerID="d3e843dfb2b36b2984f6143425353086c025df5ad8c3aaf090607e269c3214ec" Nov 26 05:54:46 crc kubenswrapper[4871]: E1126 05:54:46.889414 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-rlr55_openstack-operators(f68377a4-dee0-404b-988a-4f0673466e62)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" podUID="f68377a4-dee0-404b-988a-4f0673466e62" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.899893 4871 generic.go:334] "Generic (PLEG): container finished" podID="9253bdc4-d16f-42eb-8704-0965e99dfe47" containerID="e6ce7f632cb150e31becd63927b1a2150ffbc2b29849ad40ef42db2d2ba52804" exitCode=1 Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.900496 4871 scope.go:117] "RemoveContainer" containerID="e6ce7f632cb150e31becd63927b1a2150ffbc2b29849ad40ef42db2d2ba52804" Nov 26 05:54:46 crc kubenswrapper[4871]: E1126 05:54:46.900780 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-5kslm_openstack-operators(9253bdc4-d16f-42eb-8704-0965e99dfe47)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" podUID="9253bdc4-d16f-42eb-8704-0965e99dfe47" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.900964 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" event={"ID":"9253bdc4-d16f-42eb-8704-0965e99dfe47","Type":"ContainerDied","Data":"e6ce7f632cb150e31becd63927b1a2150ffbc2b29849ad40ef42db2d2ba52804"} Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.906033 4871 scope.go:117] "RemoveContainer" containerID="58cfb7064120bb23c71a25a1edfbe171fff9d3ce52b7b9bce0dcb2113560a8d1" Nov 26 05:54:46 crc kubenswrapper[4871]: E1126 05:54:46.906344 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-6fdcddb789-6lpnj_openstack-operators(19a75285-dcb7-4f34-b79c-613c96d555de)\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" podUID="19a75285-dcb7-4f34-b79c-613c96d555de" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.910753 4871 generic.go:334] "Generic (PLEG): container finished" podID="1cc75505-b927-488b-8a16-4fda9a1c2dca" containerID="7a08aa908047d89602c6884e9a07c225e9ef97a697bc50b69575e894fa7a5e44" exitCode=1 Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.910806 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" event={"ID":"1cc75505-b927-488b-8a16-4fda9a1c2dca","Type":"ContainerDied","Data":"7a08aa908047d89602c6884e9a07c225e9ef97a697bc50b69575e894fa7a5e44"} Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.911391 4871 scope.go:117] "RemoveContainer" containerID="7a08aa908047d89602c6884e9a07c225e9ef97a697bc50b69575e894fa7a5e44" Nov 26 05:54:46 crc kubenswrapper[4871]: E1126 05:54:46.911630 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-57988cc5b5-dxbwn_openstack-operators(1cc75505-b927-488b-8a16-4fda9a1c2dca)\"" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" podUID="1cc75505-b927-488b-8a16-4fda9a1c2dca" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.917729 4871 generic.go:334] "Generic (PLEG): container finished" podID="32cd59dd-1a82-4fce-81b1-ebc8f75f1e93" containerID="f15dd1970a46e60fe57ee0f6a44c3b8ca5989904386ea15498f1a8a4f5539699" exitCode=1 Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.917799 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" event={"ID":"32cd59dd-1a82-4fce-81b1-ebc8f75f1e93","Type":"ContainerDied","Data":"f15dd1970a46e60fe57ee0f6a44c3b8ca5989904386ea15498f1a8a4f5539699"} Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.918440 4871 scope.go:117] "RemoveContainer" containerID="f15dd1970a46e60fe57ee0f6a44c3b8ca5989904386ea15498f1a8a4f5539699" Nov 26 05:54:46 crc kubenswrapper[4871]: E1126 05:54:46.918827 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-7b4567c7cf-4gvxx_openstack-operators(32cd59dd-1a82-4fce-81b1-ebc8f75f1e93)\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" podUID="32cd59dd-1a82-4fce-81b1-ebc8f75f1e93" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.920448 4871 generic.go:334] "Generic (PLEG): container finished" podID="94ce6277-5176-415b-9f4d-847a73c93723" containerID="8136fb1a07b7db07a40248a6e25afcabfc782d8202c8880ff4ec160ec4936936" exitCode=1 Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.921101 4871 scope.go:117] "RemoveContainer" containerID="8136fb1a07b7db07a40248a6e25afcabfc782d8202c8880ff4ec160ec4936936" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.921129 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" event={"ID":"94ce6277-5176-415b-9f4d-847a73c93723","Type":"ContainerDied","Data":"8136fb1a07b7db07a40248a6e25afcabfc782d8202c8880ff4ec160ec4936936"} Nov 26 05:54:46 crc kubenswrapper[4871]: E1126 05:54:46.921374 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=glance-operator-controller-manager-589cbd6b5b-czv5j_openstack-operators(94ce6277-5176-415b-9f4d-847a73c93723)\"" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" podUID="94ce6277-5176-415b-9f4d-847a73c93723" Nov 26 05:54:46 crc kubenswrapper[4871]: I1126 05:54:46.947552 4871 scope.go:117] "RemoveContainer" containerID="cbadabae3fcd702f590e75f116e55d5e3cf61fe5ed0dbb95e07144b480bf4ff5" Nov 26 05:54:47 crc kubenswrapper[4871]: I1126 05:54:47.007393 4871 scope.go:117] "RemoveContainer" containerID="d5abb2da6d5d4dbb1d73ea36dde21af7e95ad0c3292da57bbc0125a7eb754783" Nov 26 05:54:47 crc kubenswrapper[4871]: I1126 05:54:47.062283 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 26 05:54:47 crc kubenswrapper[4871]: I1126 05:54:47.072147 4871 scope.go:117] "RemoveContainer" containerID="3088974cdf419f4d04057f0b9810ee6be73ae55ba8a84ff6b8e28058fb4afb0e" Nov 26 05:54:47 crc kubenswrapper[4871]: I1126 05:54:47.105154 4871 scope.go:117] "RemoveContainer" containerID="cd67ed0a097dfab3036663f2143ae697d23ba1e621e97bb593ebb6b539890fb8" Nov 26 05:54:47 crc kubenswrapper[4871]: I1126 05:54:47.126162 4871 scope.go:117] "RemoveContainer" containerID="05f930210853fa853fe48dd63df7a0baec1e3eccccd99d2ef95036a9d87d5f33" Nov 26 05:54:47 crc kubenswrapper[4871]: I1126 05:54:47.153647 4871 scope.go:117] "RemoveContainer" containerID="19029519a721c5302346221f5c769fdd20e20118b672d0b32843135dcb6b5823" Nov 26 05:54:47 crc kubenswrapper[4871]: I1126 05:54:47.176708 4871 scope.go:117] "RemoveContainer" containerID="10c02f89c12d91aa6c8a4af1b3e95d4857df0e1d88a4fff291795f84a734520f" Nov 26 05:54:47 crc kubenswrapper[4871]: I1126 05:54:47.201077 4871 scope.go:117] "RemoveContainer" containerID="ac9ba9f70165f33e5d5356665cee826036e01cc5dba695debaad06db9b1520b4" Nov 26 05:54:47 crc kubenswrapper[4871]: I1126 05:54:47.522107 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg" Nov 26 05:54:47 crc kubenswrapper[4871]: I1126 05:54:47.941413 4871 scope.go:117] "RemoveContainer" containerID="e928a5d01b0df2a151431739d78e64fb7bf7a8623907e43210121abf106b50f5" Nov 26 05:54:47 crc kubenswrapper[4871]: E1126 05:54:47.941955 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-5d494799bf-clm5v_openstack-operators(4659b831-32eb-4da2-97f3-f654a299605e)\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" podUID="4659b831-32eb-4da2-97f3-f654a299605e" Nov 26 05:54:47 crc kubenswrapper[4871]: I1126 05:54:47.955521 4871 scope.go:117] "RemoveContainer" containerID="804979701f046e843b38eb1d96df3b676c7b5875ca0fc0cd135a1ea1d3a271b4" Nov 26 05:54:47 crc kubenswrapper[4871]: E1126 05:54:47.956133 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-955677c94-tsz49_openstack-operators(70168336-54b1-481f-b6a0-d565be07d353)\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" podUID="70168336-54b1-481f-b6a0-d565be07d353" Nov 26 05:54:47 crc kubenswrapper[4871]: I1126 05:54:47.963043 4871 scope.go:117] "RemoveContainer" containerID="f15dd1970a46e60fe57ee0f6a44c3b8ca5989904386ea15498f1a8a4f5539699" Nov 26 05:54:47 crc kubenswrapper[4871]: E1126 05:54:47.963608 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-7b4567c7cf-4gvxx_openstack-operators(32cd59dd-1a82-4fce-81b1-ebc8f75f1e93)\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" podUID="32cd59dd-1a82-4fce-81b1-ebc8f75f1e93" Nov 26 05:54:47 crc kubenswrapper[4871]: I1126 05:54:47.969339 4871 scope.go:117] "RemoveContainer" containerID="8136fb1a07b7db07a40248a6e25afcabfc782d8202c8880ff4ec160ec4936936" Nov 26 05:54:47 crc kubenswrapper[4871]: E1126 05:54:47.970183 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=glance-operator-controller-manager-589cbd6b5b-czv5j_openstack-operators(94ce6277-5176-415b-9f4d-847a73c93723)\"" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" podUID="94ce6277-5176-415b-9f4d-847a73c93723" Nov 26 05:54:47 crc kubenswrapper[4871]: I1126 05:54:47.981373 4871 scope.go:117] "RemoveContainer" containerID="d3e843dfb2b36b2984f6143425353086c025df5ad8c3aaf090607e269c3214ec" Nov 26 05:54:47 crc kubenswrapper[4871]: E1126 05:54:47.982369 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-rlr55_openstack-operators(f68377a4-dee0-404b-988a-4f0673466e62)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" podUID="f68377a4-dee0-404b-988a-4f0673466e62" Nov 26 05:54:47 crc kubenswrapper[4871]: I1126 05:54:47.982740 4871 scope.go:117] "RemoveContainer" containerID="e6ce7f632cb150e31becd63927b1a2150ffbc2b29849ad40ef42db2d2ba52804" Nov 26 05:54:47 crc kubenswrapper[4871]: E1126 05:54:47.983269 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-5kslm_openstack-operators(9253bdc4-d16f-42eb-8704-0965e99dfe47)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" podUID="9253bdc4-d16f-42eb-8704-0965e99dfe47" Nov 26 05:54:47 crc kubenswrapper[4871]: I1126 05:54:47.985175 4871 scope.go:117] "RemoveContainer" containerID="7a08aa908047d89602c6884e9a07c225e9ef97a697bc50b69575e894fa7a5e44" Nov 26 05:54:47 crc kubenswrapper[4871]: E1126 05:54:47.985687 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-57988cc5b5-dxbwn_openstack-operators(1cc75505-b927-488b-8a16-4fda9a1c2dca)\"" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" podUID="1cc75505-b927-488b-8a16-4fda9a1c2dca" Nov 26 05:54:47 crc kubenswrapper[4871]: I1126 05:54:47.988262 4871 scope.go:117] "RemoveContainer" containerID="54b8071bf3fc326e75b73bc1fdb21c03db57040b9ab9e4feed567fa35bb72290" Nov 26 05:54:47 crc kubenswrapper[4871]: E1126 05:54:47.988828 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-d77b94747-skx5k_openstack-operators(4b0778b1-b974-4ce6-bac4-59920ab67dd7)\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" podUID="4b0778b1-b974-4ce6-bac4-59920ab67dd7" Nov 26 05:54:49 crc kubenswrapper[4871]: I1126 05:54:49.000675 4871 scope.go:117] "RemoveContainer" containerID="e6ce7f632cb150e31becd63927b1a2150ffbc2b29849ad40ef42db2d2ba52804" Nov 26 05:54:49 crc kubenswrapper[4871]: E1126 05:54:49.001153 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-5kslm_openstack-operators(9253bdc4-d16f-42eb-8704-0965e99dfe47)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" podUID="9253bdc4-d16f-42eb-8704-0965e99dfe47" Nov 26 05:54:49 crc kubenswrapper[4871]: I1126 05:54:49.066228 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="e20fd17b-5b64-4272-9876-347ea057aa04" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 26 05:54:49 crc kubenswrapper[4871]: I1126 05:54:49.066353 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/kube-state-metrics-0" Nov 26 05:54:49 crc kubenswrapper[4871]: I1126 05:54:49.067701 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-state-metrics" containerStatusID={"Type":"cri-o","ID":"7ccf9074a14c68d82f7d4ade08497eca035b4d9ff429c2547ba37643744bbae9"} pod="openstack/kube-state-metrics-0" containerMessage="Container kube-state-metrics failed liveness probe, will be restarted" Nov 26 05:54:49 crc kubenswrapper[4871]: I1126 05:54:49.067788 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="e20fd17b-5b64-4272-9876-347ea057aa04" containerName="kube-state-metrics" containerID="cri-o://7ccf9074a14c68d82f7d4ade08497eca035b4d9ff429c2547ba37643744bbae9" gracePeriod=30 Nov 26 05:54:49 crc kubenswrapper[4871]: I1126 05:54:49.148434 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:54:49 crc kubenswrapper[4871]: I1126 05:54:49.154733 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 05:54:49 crc kubenswrapper[4871]: I1126 05:54:49.888437 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-mph7b" Nov 26 05:54:50 crc kubenswrapper[4871]: I1126 05:54:50.016367 4871 generic.go:334] "Generic (PLEG): container finished" podID="e20fd17b-5b64-4272-9876-347ea057aa04" containerID="7ccf9074a14c68d82f7d4ade08497eca035b4d9ff429c2547ba37643744bbae9" exitCode=2 Nov 26 05:54:50 crc kubenswrapper[4871]: I1126 05:54:50.016407 4871 generic.go:334] "Generic (PLEG): container finished" podID="e20fd17b-5b64-4272-9876-347ea057aa04" containerID="ac86455e6df8e377979a2f17ebf3ce1802242452e3dd253695d5a751e7a6affb" exitCode=1 Nov 26 05:54:50 crc kubenswrapper[4871]: I1126 05:54:50.016465 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"e20fd17b-5b64-4272-9876-347ea057aa04","Type":"ContainerDied","Data":"7ccf9074a14c68d82f7d4ade08497eca035b4d9ff429c2547ba37643744bbae9"} Nov 26 05:54:50 crc kubenswrapper[4871]: I1126 05:54:50.016577 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"e20fd17b-5b64-4272-9876-347ea057aa04","Type":"ContainerDied","Data":"ac86455e6df8e377979a2f17ebf3ce1802242452e3dd253695d5a751e7a6affb"} Nov 26 05:54:50 crc kubenswrapper[4871]: I1126 05:54:50.016604 4871 scope.go:117] "RemoveContainer" containerID="7ccf9074a14c68d82f7d4ade08497eca035b4d9ff429c2547ba37643744bbae9" Nov 26 05:54:50 crc kubenswrapper[4871]: I1126 05:54:50.017313 4871 scope.go:117] "RemoveContainer" containerID="ac86455e6df8e377979a2f17ebf3ce1802242452e3dd253695d5a751e7a6affb" Nov 26 05:54:50 crc kubenswrapper[4871]: I1126 05:54:50.065695 4871 scope.go:117] "RemoveContainer" containerID="7ccf9074a14c68d82f7d4ade08497eca035b4d9ff429c2547ba37643744bbae9" Nov 26 05:54:50 crc kubenswrapper[4871]: E1126 05:54:50.066239 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ccf9074a14c68d82f7d4ade08497eca035b4d9ff429c2547ba37643744bbae9\": container with ID starting with 7ccf9074a14c68d82f7d4ade08497eca035b4d9ff429c2547ba37643744bbae9 not found: ID does not exist" containerID="7ccf9074a14c68d82f7d4ade08497eca035b4d9ff429c2547ba37643744bbae9" Nov 26 05:54:50 crc kubenswrapper[4871]: I1126 05:54:50.066289 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ccf9074a14c68d82f7d4ade08497eca035b4d9ff429c2547ba37643744bbae9"} err="failed to get container status \"7ccf9074a14c68d82f7d4ade08497eca035b4d9ff429c2547ba37643744bbae9\": rpc error: code = NotFound desc = could not find container \"7ccf9074a14c68d82f7d4ade08497eca035b4d9ff429c2547ba37643744bbae9\": container with ID starting with 7ccf9074a14c68d82f7d4ade08497eca035b4d9ff429c2547ba37643744bbae9 not found: ID does not exist" Nov 26 05:54:50 crc kubenswrapper[4871]: I1126 05:54:50.157126 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 26 05:54:50 crc kubenswrapper[4871]: I1126 05:54:50.172478 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 26 05:54:50 crc kubenswrapper[4871]: I1126 05:54:50.275057 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 26 05:54:50 crc kubenswrapper[4871]: I1126 05:54:50.507330 4871 scope.go:117] "RemoveContainer" containerID="caa2e48a4dc902ee736d46b8875c4cfa81b4a6939a7821fc7a8e1c8d411fe383" Nov 26 05:54:50 crc kubenswrapper[4871]: I1126 05:54:50.509233 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:54:50 crc kubenswrapper[4871]: E1126 05:54:50.509901 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:54:50 crc kubenswrapper[4871]: I1126 05:54:50.959360 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.009344 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.029475 4871 generic.go:334] "Generic (PLEG): container finished" podID="e20fd17b-5b64-4272-9876-347ea057aa04" containerID="65a47e92f235132ead33a245dde9c9bc40c3865f3032253f4754a4ad44d0946f" exitCode=1 Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.029563 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"e20fd17b-5b64-4272-9876-347ea057aa04","Type":"ContainerDied","Data":"65a47e92f235132ead33a245dde9c9bc40c3865f3032253f4754a4ad44d0946f"} Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.029595 4871 scope.go:117] "RemoveContainer" containerID="ac86455e6df8e377979a2f17ebf3ce1802242452e3dd253695d5a751e7a6affb" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.030494 4871 scope.go:117] "RemoveContainer" containerID="65a47e92f235132ead33a245dde9c9bc40c3865f3032253f4754a4ad44d0946f" Nov 26 05:54:51 crc kubenswrapper[4871]: E1126 05:54:51.031032 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(e20fd17b-5b64-4272-9876-347ea057aa04)\"" pod="openstack/kube-state-metrics-0" podUID="e20fd17b-5b64-4272-9876-347ea057aa04" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.032425 4871 generic.go:334] "Generic (PLEG): container finished" podID="0f2d5628-2ad3-400c-bc77-b0251683a83a" containerID="5628d1980e13841fb35a22f86783a40da3176541f20ca09b1c02f7c5a1e1f7de" exitCode=1 Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.032474 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" event={"ID":"0f2d5628-2ad3-400c-bc77-b0251683a83a","Type":"ContainerDied","Data":"5628d1980e13841fb35a22f86783a40da3176541f20ca09b1c02f7c5a1e1f7de"} Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.033493 4871 scope.go:117] "RemoveContainer" containerID="5628d1980e13841fb35a22f86783a40da3176541f20ca09b1c02f7c5a1e1f7de" Nov 26 05:54:51 crc kubenswrapper[4871]: E1126 05:54:51.033905 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-645b9949f7-48k8g_metallb-system(0f2d5628-2ad3-400c-bc77-b0251683a83a)\"" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" podUID="0f2d5628-2ad3-400c-bc77-b0251683a83a" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.115339 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.116790 4871 scope.go:117] "RemoveContainer" containerID="d6be69d1ebb14f1d555e49c61ea1fe237f68102cb74b6ab0db7cc05c9ce463e6" Nov 26 05:54:51 crc kubenswrapper[4871]: E1126 05:54:51.117303 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-x5hqw_openstack-operators(06b4e3ae-765b-41c4-9334-4e33c2dc305f)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" podUID="06b4e3ae-765b-41c4-9334-4e33c2dc305f" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.138398 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-bjbh8" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.150152 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.155607 4871 scope.go:117] "RemoveContainer" containerID="caa2e48a4dc902ee736d46b8875c4cfa81b4a6939a7821fc7a8e1c8d411fe383" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.180759 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.232980 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.360484 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.362672 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.419264 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.426342 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-sw6fb" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.437089 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.660810 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.683848 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.685200 4871 scope.go:117] "RemoveContainer" containerID="c03c3af92258a06129d108fd426c762d6405d0d37cd51332da502f43cc6b4d63" Nov 26 05:54:51 crc kubenswrapper[4871]: E1126 05:54:51.685688 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-56868586f6-2v8hx_openstack-operators(6d7ff4ed-503b-4184-8633-47598150b7f0)\"" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" podUID="6d7ff4ed-503b-4184-8633-47598150b7f0" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.870919 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 26 05:54:51 crc kubenswrapper[4871]: I1126 05:54:51.874035 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 26 05:54:52 crc kubenswrapper[4871]: I1126 05:54:52.050846 4871 scope.go:117] "RemoveContainer" containerID="65a47e92f235132ead33a245dde9c9bc40c3865f3032253f4754a4ad44d0946f" Nov 26 05:54:52 crc kubenswrapper[4871]: E1126 05:54:52.051347 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(e20fd17b-5b64-4272-9876-347ea057aa04)\"" pod="openstack/kube-state-metrics-0" podUID="e20fd17b-5b64-4272-9876-347ea057aa04" Nov 26 05:54:52 crc kubenswrapper[4871]: I1126 05:54:52.171036 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 26 05:54:52 crc kubenswrapper[4871]: I1126 05:54:52.299307 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 26 05:54:52 crc kubenswrapper[4871]: I1126 05:54:52.327499 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 26 05:54:52 crc kubenswrapper[4871]: I1126 05:54:52.378093 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 26 05:54:52 crc kubenswrapper[4871]: I1126 05:54:52.409624 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 26 05:54:52 crc kubenswrapper[4871]: I1126 05:54:52.445673 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 26 05:54:52 crc kubenswrapper[4871]: I1126 05:54:52.757775 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-cpdzr" Nov 26 05:54:52 crc kubenswrapper[4871]: I1126 05:54:52.797061 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-p7dxk" Nov 26 05:54:53 crc kubenswrapper[4871]: I1126 05:54:53.017828 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 26 05:54:53 crc kubenswrapper[4871]: I1126 05:54:53.108724 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 26 05:54:53 crc kubenswrapper[4871]: I1126 05:54:53.217359 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 26 05:54:53 crc kubenswrapper[4871]: I1126 05:54:53.252126 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 26 05:54:53 crc kubenswrapper[4871]: I1126 05:54:53.323473 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 26 05:54:53 crc kubenswrapper[4871]: I1126 05:54:53.403655 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 05:54:53 crc kubenswrapper[4871]: I1126 05:54:53.454277 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 26 05:54:53 crc kubenswrapper[4871]: I1126 05:54:53.473337 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 26 05:54:53 crc kubenswrapper[4871]: I1126 05:54:53.500026 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-hswlh" Nov 26 05:54:53 crc kubenswrapper[4871]: I1126 05:54:53.523017 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 26 05:54:53 crc kubenswrapper[4871]: I1126 05:54:53.580230 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 26 05:54:53 crc kubenswrapper[4871]: I1126 05:54:53.586489 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 26 05:54:53 crc kubenswrapper[4871]: I1126 05:54:53.623775 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 26 05:54:53 crc kubenswrapper[4871]: I1126 05:54:53.831851 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 26 05:54:53 crc kubenswrapper[4871]: I1126 05:54:53.834009 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 26 05:54:53 crc kubenswrapper[4871]: I1126 05:54:53.834145 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-plugins-conf" Nov 26 05:54:53 crc kubenswrapper[4871]: I1126 05:54:53.834742 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 26 05:54:53 crc kubenswrapper[4871]: I1126 05:54:53.919667 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 26 05:54:53 crc kubenswrapper[4871]: I1126 05:54:53.961275 4871 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-9m6k8" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.008125 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.056599 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.088467 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.093673 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.170706 4871 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.268273 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.372162 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.376559 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.381057 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.406682 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.478210 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.503885 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.510899 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.612945 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.624706 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.648513 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.649382 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.684297 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.739175 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.746689 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.768071 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.852623 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.876739 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.889556 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-notifications-svc" Nov 26 05:54:54 crc kubenswrapper[4871]: I1126 05:54:54.956311 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.003974 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.024091 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.071661 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.092683 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.097059 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.152965 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.162157 4871 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-2m8lv" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.221170 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-rhkvj" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.221494 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.235206 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.241487 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.255056 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-5675dd9766-bp9px" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.272269 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-ll4ht" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.273142 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.274992 4871 scope.go:117] "RemoveContainer" containerID="1b4913dabb35fda193e0f05e91aab5c70dd4685198c87a3da4cca8ab6a266ac2" Nov 26 05:54:55 crc kubenswrapper[4871]: E1126 05:54:55.275672 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-7b64f4fb85-bdpn4_openstack-operators(8c65e9f4-e3de-4bce-851a-f85c1036daa7)\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" podUID="8c65e9f4-e3de-4bce-851a-f85c1036daa7" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.286849 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.287880 4871 scope.go:117] "RemoveContainer" containerID="9b9f167ae078dc80d024412e540d6a6a879a16cca9bb6a2b52e78e92d4ba7bc0" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.372417 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.373225 4871 scope.go:117] "RemoveContainer" containerID="e928a5d01b0df2a151431739d78e64fb7bf7a8623907e43210121abf106b50f5" Nov 26 05:54:55 crc kubenswrapper[4871]: E1126 05:54:55.373552 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-5d494799bf-clm5v_openstack-operators(4659b831-32eb-4da2-97f3-f654a299605e)\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" podUID="4659b831-32eb-4da2-97f3-f654a299605e" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.527028 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.528186 4871 scope.go:117] "RemoveContainer" containerID="8e5c88c9818a4ab1833f0c6d78b0c3022927db845bb1c6ef9e37848443b4c8d3" Nov 26 05:54:55 crc kubenswrapper[4871]: E1126 05:54:55.528667 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-lzsqj_openstack-operators(51410db5-d309-4625-8f36-02cf8f0ba419)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" podUID="51410db5-d309-4625-8f36-02cf8f0ba419" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.536959 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.563696 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.583508 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-xlclw" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.591981 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.593344 4871 scope.go:117] "RemoveContainer" containerID="a9b994af78dbf219dda52f4dac268189619d8e9c193679afdf985c97101b404f" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.629094 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.630158 4871 scope.go:117] "RemoveContainer" containerID="d3e843dfb2b36b2984f6143425353086c025df5ad8c3aaf090607e269c3214ec" Nov 26 05:54:55 crc kubenswrapper[4871]: E1126 05:54:55.630716 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-rlr55_openstack-operators(f68377a4-dee0-404b-988a-4f0673466e62)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" podUID="f68377a4-dee0-404b-988a-4f0673466e62" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.649346 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.650098 4871 scope.go:117] "RemoveContainer" containerID="b1651a013485aa1e98c052ade5bfde7694f5f95c7265ed1ac1c33b7d9230f034" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.665785 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.674681 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.675770 4871 scope.go:117] "RemoveContainer" containerID="98b213ed28ee131a605d80664fee98e4f538d59be7fadc670296bfda45dc6c00" Nov 26 05:54:55 crc kubenswrapper[4871]: E1126 05:54:55.676226 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-9lvtk_openstack-operators(33ba2b4e-6239-43c0-a694-6495b7ae2ba3)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" podUID="33ba2b4e-6239-43c0-a694-6495b7ae2ba3" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.694170 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.709040 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.710184 4871 scope.go:117] "RemoveContainer" containerID="6ee456f70d5060a5ddf0985f51451ea0a8cf11c655fcc122b957a3c640b70298" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.755783 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.777183 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.817478 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.827606 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-lhlqv" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.858450 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.871983 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.902318 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-jj87z" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.924956 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.936858 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.941735 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-8qh9s" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.942914 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.980500 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-pzwkq" Nov 26 05:54:55 crc kubenswrapper[4871]: I1126 05:54:55.986749 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.018008 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.072906 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.075509 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-8qdzr" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.096751 4871 generic.go:334] "Generic (PLEG): container finished" podID="ea13fc75-b3f0-48d3-9d86-5262df2957eb" containerID="afaa8c9eda53548ab394745c9263b386194cd8dae3a7ff681171d9886ad0de4f" exitCode=1 Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.096859 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" event={"ID":"ea13fc75-b3f0-48d3-9d86-5262df2957eb","Type":"ContainerDied","Data":"afaa8c9eda53548ab394745c9263b386194cd8dae3a7ff681171d9886ad0de4f"} Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.096906 4871 scope.go:117] "RemoveContainer" containerID="9b9f167ae078dc80d024412e540d6a6a879a16cca9bb6a2b52e78e92d4ba7bc0" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.097824 4871 scope.go:117] "RemoveContainer" containerID="afaa8c9eda53548ab394745c9263b386194cd8dae3a7ff681171d9886ad0de4f" Nov 26 05:54:56 crc kubenswrapper[4871]: E1126 05:54:56.098261 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=cinder-operator-controller-manager-6b7f75547b-wmwwk_openstack-operators(ea13fc75-b3f0-48d3-9d86-5262df2957eb)\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" podUID="ea13fc75-b3f0-48d3-9d86-5262df2957eb" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.102152 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-79qlw" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.105851 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" event={"ID":"2c7b5f25-e4ef-4abd-ba84-61b98f194ddd","Type":"ContainerStarted","Data":"5c5b1632678267f5cf169975e9c5be40531f8af2f11068fc8ce7f74fc914b6d5"} Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.106061 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.108741 4871 generic.go:334] "Generic (PLEG): container finished" podID="6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c" containerID="d4793c6d4e0ccf18be306f9d76a620f15aa14ca31ce8683e83949ca8484ad021" exitCode=1 Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.108799 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" event={"ID":"6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c","Type":"ContainerDied","Data":"d4793c6d4e0ccf18be306f9d76a620f15aa14ca31ce8683e83949ca8484ad021"} Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.110214 4871 scope.go:117] "RemoveContainer" containerID="d4793c6d4e0ccf18be306f9d76a620f15aa14ca31ce8683e83949ca8484ad021" Nov 26 05:54:56 crc kubenswrapper[4871]: E1126 05:54:56.112942 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=manila-operator-controller-manager-5d499bf58b-jvztg_openstack-operators(6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c)\"" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" podUID="6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.118494 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.122637 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" event={"ID":"6ccd73b2-dbfd-4cd6-845c-a61af4f20f96","Type":"ContainerStarted","Data":"73d05c5d7838ca23109676f65c3dfb9cdb7dd0059e28e646adbf471e17639eda"} Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.124045 4871 scope.go:117] "RemoveContainer" containerID="73d05c5d7838ca23109676f65c3dfb9cdb7dd0059e28e646adbf471e17639eda" Nov 26 05:54:56 crc kubenswrapper[4871]: E1126 05:54:56.124643 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ovn-operator-controller-manager-56897c768d-shgb6_openstack-operators(6ccd73b2-dbfd-4cd6-845c-a61af4f20f96)\"" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" podUID="6ccd73b2-dbfd-4cd6-845c-a61af4f20f96" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.134836 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.137733 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.148712 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.154140 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.187444 4871 scope.go:117] "RemoveContainer" containerID="a9b994af78dbf219dda52f4dac268189619d8e9c193679afdf985c97101b404f" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.212913 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.234841 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.247083 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.257733 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.310760 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.355367 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.391424 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-dj9cw" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.415976 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.423285 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.437593 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.461179 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.471816 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.475372 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.541220 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.561861 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.589183 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.630995 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.661469 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.665260 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.675971 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.682934 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-kxnxk" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.714047 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.743866 4871 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.743891 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-d2mkt" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.777562 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.786261 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.787183 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-jtj7s" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.807225 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.810859 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.837480 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.847734 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.910848 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.919067 4871 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.929214 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-876rq" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.961303 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-5dkq4" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.964771 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 26 05:54:56 crc kubenswrapper[4871]: I1126 05:54:56.993488 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.005757 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-shhw2" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.054968 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.076216 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.079348 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.136386 4871 generic.go:334] "Generic (PLEG): container finished" podID="6ccd73b2-dbfd-4cd6-845c-a61af4f20f96" containerID="73d05c5d7838ca23109676f65c3dfb9cdb7dd0059e28e646adbf471e17639eda" exitCode=1 Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.136491 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" event={"ID":"6ccd73b2-dbfd-4cd6-845c-a61af4f20f96","Type":"ContainerDied","Data":"73d05c5d7838ca23109676f65c3dfb9cdb7dd0059e28e646adbf471e17639eda"} Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.136593 4871 scope.go:117] "RemoveContainer" containerID="6ee456f70d5060a5ddf0985f51451ea0a8cf11c655fcc122b957a3c640b70298" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.137377 4871 scope.go:117] "RemoveContainer" containerID="73d05c5d7838ca23109676f65c3dfb9cdb7dd0059e28e646adbf471e17639eda" Nov 26 05:54:57 crc kubenswrapper[4871]: E1126 05:54:57.137699 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ovn-operator-controller-manager-56897c768d-shgb6_openstack-operators(6ccd73b2-dbfd-4cd6-845c-a61af4f20f96)\"" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" podUID="6ccd73b2-dbfd-4cd6-845c-a61af4f20f96" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.145721 4871 generic.go:334] "Generic (PLEG): container finished" podID="2c7b5f25-e4ef-4abd-ba84-61b98f194ddd" containerID="5c5b1632678267f5cf169975e9c5be40531f8af2f11068fc8ce7f74fc914b6d5" exitCode=1 Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.145780 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" event={"ID":"2c7b5f25-e4ef-4abd-ba84-61b98f194ddd","Type":"ContainerDied","Data":"5c5b1632678267f5cf169975e9c5be40531f8af2f11068fc8ce7f74fc914b6d5"} Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.146474 4871 scope.go:117] "RemoveContainer" containerID="5c5b1632678267f5cf169975e9c5be40531f8af2f11068fc8ce7f74fc914b6d5" Nov 26 05:54:57 crc kubenswrapper[4871]: E1126 05:54:57.146841 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=mariadb-operator-controller-manager-66f4dd4bc7-9xghq_openstack-operators(2c7b5f25-e4ef-4abd-ba84-61b98f194ddd)\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" podUID="2c7b5f25-e4ef-4abd-ba84-61b98f194ddd" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.148483 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.176012 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.223823 4871 scope.go:117] "RemoveContainer" containerID="b1651a013485aa1e98c052ade5bfde7694f5f95c7265ed1ac1c33b7d9230f034" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.242623 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.265788 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.280826 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.282728 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.316955 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.317410 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.326278 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-server-conf" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.332307 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.360920 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.397963 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.439729 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.491707 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.543258 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-8t7pb" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.555155 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.556118 4871 scope.go:117] "RemoveContainer" containerID="5628d1980e13841fb35a22f86783a40da3176541f20ca09b1c02f7c5a1e1f7de" Nov 26 05:54:57 crc kubenswrapper[4871]: E1126 05:54:57.556518 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-645b9949f7-48k8g_metallb-system(0f2d5628-2ad3-400c-bc77-b0251683a83a)\"" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" podUID="0f2d5628-2ad3-400c-bc77-b0251683a83a" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.581189 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-6xdcj" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.599815 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.653068 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.656673 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-mzrhf" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.668423 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.705256 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.706498 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.713353 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.718337 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.752216 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.757929 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.804982 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.821146 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.828710 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.881080 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.885795 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.890130 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.919269 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.935238 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.939277 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.986287 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 26 05:54:57 crc kubenswrapper[4871]: I1126 05:54:57.989275 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.030952 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.057377 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.081392 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.099413 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.106453 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.161004 4871 scope.go:117] "RemoveContainer" containerID="5c5b1632678267f5cf169975e9c5be40531f8af2f11068fc8ce7f74fc914b6d5" Nov 26 05:54:58 crc kubenswrapper[4871]: E1126 05:54:58.161343 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=mariadb-operator-controller-manager-66f4dd4bc7-9xghq_openstack-operators(2c7b5f25-e4ef-4abd-ba84-61b98f194ddd)\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" podUID="2c7b5f25-e4ef-4abd-ba84-61b98f194ddd" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.170362 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.170362 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.187455 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.188708 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.220973 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-2wncc" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.231173 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.233604 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.250505 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.284772 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-7br6l" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.336553 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.362850 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.411538 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.458230 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-ngt4p" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.507549 4871 scope.go:117] "RemoveContainer" containerID="d476f3b75a10f7ec53e2324e30cfc01a24ae903827a2e155550f04a8026e951d" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.508146 4871 scope.go:117] "RemoveContainer" containerID="e43847e9233ad2f61c1671e9e9f336861270e81a6887e3b7be510be1c662344e" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.508252 4871 scope.go:117] "RemoveContainer" containerID="2cfcc0bf07b99a341a22e1a6e653e6c5663fbd6eeb5366a3072212f713e93375" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.508800 4871 scope.go:117] "RemoveContainer" containerID="8136fb1a07b7db07a40248a6e25afcabfc782d8202c8880ff4ec160ec4936936" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.540954 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.572203 4871 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.586483 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.586597 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.591136 4871 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.593961 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.604802 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-x6sgw" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.608176 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=19.608129988 podStartE2EDuration="19.608129988s" podCreationTimestamp="2025-11-26 05:54:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 05:54:58.603758929 +0000 UTC m=+1756.786810515" watchObservedRunningTime="2025-11-26 05:54:58.608129988 +0000 UTC m=+1756.791181584" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.647636 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.670036 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.685405 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.687304 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.706912 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.707243 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.717774 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.757173 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.790004 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-qb6hl" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.866126 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.938320 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-public-svc" Nov 26 05:54:58 crc kubenswrapper[4871]: I1126 05:54:58.960280 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.007441 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.046720 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.056990 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.057696 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/kube-state-metrics-0" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.057859 4871 scope.go:117] "RemoveContainer" containerID="65a47e92f235132ead33a245dde9c9bc40c3865f3032253f4754a4ad44d0946f" Nov 26 05:54:59 crc kubenswrapper[4871]: E1126 05:54:59.058368 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(e20fd17b-5b64-4272-9876-347ea057aa04)\"" pod="openstack/kube-state-metrics-0" podUID="e20fd17b-5b64-4272-9876-347ea057aa04" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.064977 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-8ngqz" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.111277 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.127677 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.139476 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.161653 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.179823 4871 generic.go:334] "Generic (PLEG): container finished" podID="974fe30e-68b5-42bb-9940-a2000ab315f8" containerID="6ca5a382c8f0ffda69d2f26145501cba6b19ef3c51811c5f2f2fa44ffbcc1085" exitCode=1 Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.179899 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" event={"ID":"974fe30e-68b5-42bb-9940-a2000ab315f8","Type":"ContainerDied","Data":"6ca5a382c8f0ffda69d2f26145501cba6b19ef3c51811c5f2f2fa44ffbcc1085"} Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.179934 4871 scope.go:117] "RemoveContainer" containerID="2cfcc0bf07b99a341a22e1a6e653e6c5663fbd6eeb5366a3072212f713e93375" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.180729 4871 scope.go:117] "RemoveContainer" containerID="6ca5a382c8f0ffda69d2f26145501cba6b19ef3c51811c5f2f2fa44ffbcc1085" Nov 26 05:54:59 crc kubenswrapper[4871]: E1126 05:54:59.181059 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=telemetry-operator-controller-manager-76cc84c6bb-6kccm_openstack-operators(974fe30e-68b5-42bb-9940-a2000ab315f8)\"" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" podUID="974fe30e-68b5-42bb-9940-a2000ab315f8" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.183177 4871 generic.go:334] "Generic (PLEG): container finished" podID="8d32351e-c0cc-4c2a-89b2-a79b61cf632e" containerID="2d2b2ac177577a6e0227c3c13857f6590d3a3ca1684922d91074097cd521178d" exitCode=1 Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.183250 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" event={"ID":"8d32351e-c0cc-4c2a-89b2-a79b61cf632e","Type":"ContainerDied","Data":"2d2b2ac177577a6e0227c3c13857f6590d3a3ca1684922d91074097cd521178d"} Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.183986 4871 scope.go:117] "RemoveContainer" containerID="2d2b2ac177577a6e0227c3c13857f6590d3a3ca1684922d91074097cd521178d" Nov 26 05:54:59 crc kubenswrapper[4871]: E1126 05:54:59.184329 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=watcher-operator-controller-manager-656dcb59d4-v95x7_openstack-operators(8d32351e-c0cc-4c2a-89b2-a79b61cf632e)\"" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" podUID="8d32351e-c0cc-4c2a-89b2-a79b61cf632e" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.184469 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.185942 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.188557 4871 generic.go:334] "Generic (PLEG): container finished" podID="94ce6277-5176-415b-9f4d-847a73c93723" containerID="93382560a6bf53626fc30bdfd2b06a3a9aeb151f19eacb371b010cdbfd1d10a7" exitCode=1 Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.188634 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" event={"ID":"94ce6277-5176-415b-9f4d-847a73c93723","Type":"ContainerDied","Data":"93382560a6bf53626fc30bdfd2b06a3a9aeb151f19eacb371b010cdbfd1d10a7"} Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.189394 4871 scope.go:117] "RemoveContainer" containerID="93382560a6bf53626fc30bdfd2b06a3a9aeb151f19eacb371b010cdbfd1d10a7" Nov 26 05:54:59 crc kubenswrapper[4871]: E1126 05:54:59.189680 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=glance-operator-controller-manager-589cbd6b5b-czv5j_openstack-operators(94ce6277-5176-415b-9f4d-847a73c93723)\"" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" podUID="94ce6277-5176-415b-9f4d-847a73c93723" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.210469 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" event={"ID":"0b2406e7-8b16-45e1-b726-645d22421af5","Type":"ContainerStarted","Data":"d50ffa6bc84bd8f313e67110c71958eac1da1e8b20a960d11d6ae8b002c18e67"} Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.211143 4871 scope.go:117] "RemoveContainer" containerID="65a47e92f235132ead33a245dde9c9bc40c3865f3032253f4754a4ad44d0946f" Nov 26 05:54:59 crc kubenswrapper[4871]: E1126 05:54:59.211367 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(e20fd17b-5b64-4272-9876-347ea057aa04)\"" pod="openstack/kube-state-metrics-0" podUID="e20fd17b-5b64-4272-9876-347ea057aa04" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.216933 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.236093 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.237129 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.245850 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.250607 4871 scope.go:117] "RemoveContainer" containerID="e43847e9233ad2f61c1671e9e9f336861270e81a6887e3b7be510be1c662344e" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.250948 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-w7bjw" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.269173 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.276977 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.325160 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.357277 4871 scope.go:117] "RemoveContainer" containerID="8136fb1a07b7db07a40248a6e25afcabfc782d8202c8880ff4ec160ec4936936" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.361183 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.413133 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.456625 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.469665 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.507812 4871 scope.go:117] "RemoveContainer" containerID="f15dd1970a46e60fe57ee0f6a44c3b8ca5989904386ea15498f1a8a4f5539699" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.546768 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.554316 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.576351 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.598146 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.609835 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-h8mks" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.626856 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.639974 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.643196 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.659818 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.679923 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.684857 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.699490 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.728733 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.750047 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.754950 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.776401 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-xhm7h" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.836599 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.846323 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-4knx6" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.946335 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 26 05:54:59 crc kubenswrapper[4871]: I1126 05:54:59.946714 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.038730 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.075857 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.081376 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.099571 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.104014 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.105807 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.109099 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.185052 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.238001 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-g4dzt" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.238912 4871 generic.go:334] "Generic (PLEG): container finished" podID="32cd59dd-1a82-4fce-81b1-ebc8f75f1e93" containerID="dc8c88089b910af6abac5748429b7c27d8da141ce6b2c5bc7074c4d969095fd8" exitCode=1 Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.238999 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" event={"ID":"32cd59dd-1a82-4fce-81b1-ebc8f75f1e93","Type":"ContainerDied","Data":"dc8c88089b910af6abac5748429b7c27d8da141ce6b2c5bc7074c4d969095fd8"} Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.239045 4871 scope.go:117] "RemoveContainer" containerID="f15dd1970a46e60fe57ee0f6a44c3b8ca5989904386ea15498f1a8a4f5539699" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.240087 4871 scope.go:117] "RemoveContainer" containerID="dc8c88089b910af6abac5748429b7c27d8da141ce6b2c5bc7074c4d969095fd8" Nov 26 05:55:00 crc kubenswrapper[4871]: E1126 05:55:00.240802 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=keystone-operator-controller-manager-7b4567c7cf-4gvxx_openstack-operators(32cd59dd-1a82-4fce-81b1-ebc8f75f1e93)\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" podUID="32cd59dd-1a82-4fce-81b1-ebc8f75f1e93" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.257256 4871 generic.go:334] "Generic (PLEG): container finished" podID="0b2406e7-8b16-45e1-b726-645d22421af5" containerID="d50ffa6bc84bd8f313e67110c71958eac1da1e8b20a960d11d6ae8b002c18e67" exitCode=1 Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.257383 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" event={"ID":"0b2406e7-8b16-45e1-b726-645d22421af5","Type":"ContainerDied","Data":"d50ffa6bc84bd8f313e67110c71958eac1da1e8b20a960d11d6ae8b002c18e67"} Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.258522 4871 scope.go:117] "RemoveContainer" containerID="d50ffa6bc84bd8f313e67110c71958eac1da1e8b20a960d11d6ae8b002c18e67" Nov 26 05:55:00 crc kubenswrapper[4871]: E1126 05:55:00.259190 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=operator pod=rabbitmq-cluster-operator-manager-668c99d594-6c6pc_openstack-operators(0b2406e7-8b16-45e1-b726-645d22421af5)\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" podUID="0b2406e7-8b16-45e1-b726-645d22421af5" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.299994 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.349179 4871 scope.go:117] "RemoveContainer" containerID="d476f3b75a10f7ec53e2324e30cfc01a24ae903827a2e155550f04a8026e951d" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.399305 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.481628 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-4k6kn" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.506887 4871 scope.go:117] "RemoveContainer" containerID="54b8071bf3fc326e75b73bc1fdb21c03db57040b9ab9e4feed567fa35bb72290" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.644633 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.661651 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.703429 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.720602 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.762778 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.770694 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.852735 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.877013 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-applier-config-data" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.905611 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.921340 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.957727 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.975127 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 05:55:00 crc kubenswrapper[4871]: I1126 05:55:00.981255 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.020007 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-nr8gb" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.091901 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-j5phm" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.114796 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.115638 4871 scope.go:117] "RemoveContainer" containerID="d6be69d1ebb14f1d555e49c61ea1fe237f68102cb74b6ab0db7cc05c9ce463e6" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.194870 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.221006 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-erlang-cookie" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.254261 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.266655 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.273998 4871 generic.go:334] "Generic (PLEG): container finished" podID="4b0778b1-b974-4ce6-bac4-59920ab67dd7" containerID="65298b8f443e5f96e69187f67c045c0abd1ea806e49c09cfebbed3edd6629779" exitCode=1 Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.274033 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" event={"ID":"4b0778b1-b974-4ce6-bac4-59920ab67dd7","Type":"ContainerDied","Data":"65298b8f443e5f96e69187f67c045c0abd1ea806e49c09cfebbed3edd6629779"} Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.274060 4871 scope.go:117] "RemoveContainer" containerID="54b8071bf3fc326e75b73bc1fdb21c03db57040b9ab9e4feed567fa35bb72290" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.274694 4871 scope.go:117] "RemoveContainer" containerID="65298b8f443e5f96e69187f67c045c0abd1ea806e49c09cfebbed3edd6629779" Nov 26 05:55:01 crc kubenswrapper[4871]: E1126 05:55:01.274935 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=swift-operator-controller-manager-d77b94747-skx5k_openstack-operators(4b0778b1-b974-4ce6-bac4-59920ab67dd7)\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" podUID="4b0778b1-b974-4ce6-bac4-59920ab67dd7" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.293828 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.302376 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.375464 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.386751 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.409784 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.482167 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.495137 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.507249 4871 scope.go:117] "RemoveContainer" containerID="804979701f046e843b38eb1d96df3b676c7b5875ca0fc0cd135a1ea1d3a271b4" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.507908 4871 scope.go:117] "RemoveContainer" containerID="58cfb7064120bb23c71a25a1edfbe171fff9d3ce52b7b9bce0dcb2113560a8d1" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.537725 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.546010 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.589428 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.591785 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.592566 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.600876 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.612101 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.641696 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.659199 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.684084 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.685325 4871 scope.go:117] "RemoveContainer" containerID="c03c3af92258a06129d108fd426c762d6405d0d37cd51332da502f43cc6b4d63" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.697225 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.774477 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.795508 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-wgkrk" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.841586 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.847169 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.925567 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-tgrk2" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.925916 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.933248 4871 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.933447 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://e556fb7a67a2ab3468f23c5b9e78689aa947b6b257e9076049c6bc73006a10b1" gracePeriod=5 Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.946938 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.957676 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 26 05:55:01 crc kubenswrapper[4871]: I1126 05:55:01.962842 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.007984 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.045650 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.081210 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.164854 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.184485 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.254124 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.291937 4871 generic.go:334] "Generic (PLEG): container finished" podID="70168336-54b1-481f-b6a0-d565be07d353" containerID="016f7c2c54f212f8cb1c8a80a867adc428a4f3c47a107fd15b56791db3a40233" exitCode=1 Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.291985 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" event={"ID":"70168336-54b1-481f-b6a0-d565be07d353","Type":"ContainerDied","Data":"016f7c2c54f212f8cb1c8a80a867adc428a4f3c47a107fd15b56791db3a40233"} Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.292078 4871 scope.go:117] "RemoveContainer" containerID="804979701f046e843b38eb1d96df3b676c7b5875ca0fc0cd135a1ea1d3a271b4" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.292774 4871 scope.go:117] "RemoveContainer" containerID="016f7c2c54f212f8cb1c8a80a867adc428a4f3c47a107fd15b56791db3a40233" Nov 26 05:55:02 crc kubenswrapper[4871]: E1126 05:55:02.293107 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=designate-operator-controller-manager-955677c94-tsz49_openstack-operators(70168336-54b1-481f-b6a0-d565be07d353)\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" podUID="70168336-54b1-481f-b6a0-d565be07d353" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.295892 4871 generic.go:334] "Generic (PLEG): container finished" podID="19a75285-dcb7-4f34-b79c-613c96d555de" containerID="4ab714acd2ecba5f59cfb4fc0025d012b4661c776f9d5ba94add92812581edd7" exitCode=1 Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.295955 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" event={"ID":"19a75285-dcb7-4f34-b79c-613c96d555de","Type":"ContainerDied","Data":"4ab714acd2ecba5f59cfb4fc0025d012b4661c776f9d5ba94add92812581edd7"} Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.296334 4871 scope.go:117] "RemoveContainer" containerID="4ab714acd2ecba5f59cfb4fc0025d012b4661c776f9d5ba94add92812581edd7" Nov 26 05:55:02 crc kubenswrapper[4871]: E1126 05:55:02.296568 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=neutron-operator-controller-manager-6fdcddb789-6lpnj_openstack-operators(19a75285-dcb7-4f34-b79c-613c96d555de)\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" podUID="19a75285-dcb7-4f34-b79c-613c96d555de" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.308098 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-dst6m" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.313032 4871 generic.go:334] "Generic (PLEG): container finished" podID="06b4e3ae-765b-41c4-9334-4e33c2dc305f" containerID="be9349e0a50564a2f48e1d280550e525f57c5579e87284e9ef90455ce48764d5" exitCode=1 Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.313098 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" event={"ID":"06b4e3ae-765b-41c4-9334-4e33c2dc305f","Type":"ContainerDied","Data":"be9349e0a50564a2f48e1d280550e525f57c5579e87284e9ef90455ce48764d5"} Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.313734 4871 scope.go:117] "RemoveContainer" containerID="be9349e0a50564a2f48e1d280550e525f57c5579e87284e9ef90455ce48764d5" Nov 26 05:55:02 crc kubenswrapper[4871]: E1126 05:55:02.313972 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-x5hqw_openstack-operators(06b4e3ae-765b-41c4-9334-4e33c2dc305f)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" podUID="06b4e3ae-765b-41c4-9334-4e33c2dc305f" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.326885 4871 generic.go:334] "Generic (PLEG): container finished" podID="6d7ff4ed-503b-4184-8633-47598150b7f0" containerID="a5d69025752f2e5a3195cc36deaa38d3372fb32e4cb749ef2b5d47cda8ec7baf" exitCode=1 Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.326936 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" event={"ID":"6d7ff4ed-503b-4184-8633-47598150b7f0","Type":"ContainerDied","Data":"a5d69025752f2e5a3195cc36deaa38d3372fb32e4cb749ef2b5d47cda8ec7baf"} Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.327676 4871 scope.go:117] "RemoveContainer" containerID="a5d69025752f2e5a3195cc36deaa38d3372fb32e4cb749ef2b5d47cda8ec7baf" Nov 26 05:55:02 crc kubenswrapper[4871]: E1126 05:55:02.327952 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=openstack-operator-controller-manager-56868586f6-2v8hx_openstack-operators(6d7ff4ed-503b-4184-8633-47598150b7f0)\"" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" podUID="6d7ff4ed-503b-4184-8633-47598150b7f0" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.343416 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.350485 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.361760 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.385016 4871 scope.go:117] "RemoveContainer" containerID="58cfb7064120bb23c71a25a1edfbe171fff9d3ce52b7b9bce0dcb2113560a8d1" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.436585 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.446901 4871 scope.go:117] "RemoveContainer" containerID="d6be69d1ebb14f1d555e49c61ea1fe237f68102cb74b6ab0db7cc05c9ce463e6" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.456818 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.468897 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.471773 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.496356 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-6chhg" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.513781 4871 scope.go:117] "RemoveContainer" containerID="7a08aa908047d89602c6884e9a07c225e9ef97a697bc50b69575e894fa7a5e44" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.513879 4871 scope.go:117] "RemoveContainer" containerID="e6ce7f632cb150e31becd63927b1a2150ffbc2b29849ad40ef42db2d2ba52804" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.537426 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.539784 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-4jdrz" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.573765 4871 scope.go:117] "RemoveContainer" containerID="c03c3af92258a06129d108fd426c762d6405d0d37cd51332da502f43cc6b4d63" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.615708 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-lv2x2" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.661686 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.662358 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.707852 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.712840 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.727248 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.772979 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.799206 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.867416 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.885838 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.935993 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 26 05:55:02 crc kubenswrapper[4871]: I1126 05:55:02.946667 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.022090 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.042468 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.123470 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.165746 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.170986 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-l479r" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.192078 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-mqgn6" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.238577 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.290203 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.343953 4871 generic.go:334] "Generic (PLEG): container finished" podID="9253bdc4-d16f-42eb-8704-0965e99dfe47" containerID="74d492fe0ccfa70ad5822b436273b941175b4b6cda63803b1ca86453bea3cbdb" exitCode=1 Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.344015 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" event={"ID":"9253bdc4-d16f-42eb-8704-0965e99dfe47","Type":"ContainerDied","Data":"74d492fe0ccfa70ad5822b436273b941175b4b6cda63803b1ca86453bea3cbdb"} Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.344051 4871 scope.go:117] "RemoveContainer" containerID="e6ce7f632cb150e31becd63927b1a2150ffbc2b29849ad40ef42db2d2ba52804" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.344919 4871 scope.go:117] "RemoveContainer" containerID="74d492fe0ccfa70ad5822b436273b941175b4b6cda63803b1ca86453bea3cbdb" Nov 26 05:55:03 crc kubenswrapper[4871]: E1126 05:55:03.345296 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-5kslm_openstack-operators(9253bdc4-d16f-42eb-8704-0965e99dfe47)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" podUID="9253bdc4-d16f-42eb-8704-0965e99dfe47" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.348950 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.349872 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-notifications-config-data" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.366263 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.371738 4871 generic.go:334] "Generic (PLEG): container finished" podID="1cc75505-b927-488b-8a16-4fda9a1c2dca" containerID="5307cbbd3c0384c43c4b7f1411ac79c0581466bca67d59c5ab3e8920e1c80b9f" exitCode=1 Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.371815 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" event={"ID":"1cc75505-b927-488b-8a16-4fda9a1c2dca","Type":"ContainerDied","Data":"5307cbbd3c0384c43c4b7f1411ac79c0581466bca67d59c5ab3e8920e1c80b9f"} Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.372513 4871 scope.go:117] "RemoveContainer" containerID="5307cbbd3c0384c43c4b7f1411ac79c0581466bca67d59c5ab3e8920e1c80b9f" Nov 26 05:55:03 crc kubenswrapper[4871]: E1126 05:55:03.372923 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=placement-operator-controller-manager-57988cc5b5-dxbwn_openstack-operators(1cc75505-b927-488b-8a16-4fda9a1c2dca)\"" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" podUID="1cc75505-b927-488b-8a16-4fda9a1c2dca" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.400875 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.448568 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.463211 4871 scope.go:117] "RemoveContainer" containerID="7a08aa908047d89602c6884e9a07c225e9ef97a697bc50b69575e894fa7a5e44" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.471147 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-default-user" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.482693 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-pk9k6" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.495031 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.540911 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-6mpnz" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.541142 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.621424 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.664002 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.732651 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.783365 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.792134 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.866490 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.932064 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-xhmrf" Nov 26 05:55:03 crc kubenswrapper[4871]: I1126 05:55:03.993137 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-jsnwn" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.057809 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.147835 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.212806 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.216634 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.231375 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.238770 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.263405 4871 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.271993 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.291972 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.354325 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.376616 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.413236 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.418206 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.435836 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.463270 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.516110 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.531693 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-r6fps" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.631776 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.660003 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.671922 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-fnxxc" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.749302 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.784752 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.820988 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-8fhbp" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.926038 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-d6qg9" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.951572 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 26 05:55:04 crc kubenswrapper[4871]: I1126 05:55:04.998545 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.005538 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-2br48" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.050887 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.054745 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-69zl6" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.058812 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.074802 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.076163 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.169974 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.196310 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.220583 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.273671 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.274684 4871 scope.go:117] "RemoveContainer" containerID="1b4913dabb35fda193e0f05e91aab5c70dd4685198c87a3da4cca8ab6a266ac2" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.287142 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.287217 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.287958 4871 scope.go:117] "RemoveContainer" containerID="afaa8c9eda53548ab394745c9263b386194cd8dae3a7ff681171d9886ad0de4f" Nov 26 05:55:05 crc kubenswrapper[4871]: E1126 05:55:05.288231 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=cinder-operator-controller-manager-6b7f75547b-wmwwk_openstack-operators(ea13fc75-b3f0-48d3-9d86-5262df2957eb)\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" podUID="ea13fc75-b3f0-48d3-9d86-5262df2957eb" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.300339 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.300411 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.301561 4871 scope.go:117] "RemoveContainer" containerID="016f7c2c54f212f8cb1c8a80a867adc428a4f3c47a107fd15b56791db3a40233" Nov 26 05:55:05 crc kubenswrapper[4871]: E1126 05:55:05.302111 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=designate-operator-controller-manager-955677c94-tsz49_openstack-operators(70168336-54b1-481f-b6a0-d565be07d353)\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" podUID="70168336-54b1-481f-b6a0-d565be07d353" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.314477 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.315423 4871 scope.go:117] "RemoveContainer" containerID="dc8c88089b910af6abac5748429b7c27d8da141ce6b2c5bc7074c4d969095fd8" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.315639 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" Nov 26 05:55:05 crc kubenswrapper[4871]: E1126 05:55:05.315915 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=keystone-operator-controller-manager-7b4567c7cf-4gvxx_openstack-operators(32cd59dd-1a82-4fce-81b1-ebc8f75f1e93)\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" podUID="32cd59dd-1a82-4fce-81b1-ebc8f75f1e93" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.320411 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.321199 4871 scope.go:117] "RemoveContainer" containerID="93382560a6bf53626fc30bdfd2b06a3a9aeb151f19eacb371b010cdbfd1d10a7" Nov 26 05:55:05 crc kubenswrapper[4871]: E1126 05:55:05.321512 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=glance-operator-controller-manager-589cbd6b5b-czv5j_openstack-operators(94ce6277-5176-415b-9f4d-847a73c93723)\"" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" podUID="94ce6277-5176-415b-9f4d-847a73c93723" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.321578 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.346507 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.347279 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.351597 4871 scope.go:117] "RemoveContainer" containerID="74d492fe0ccfa70ad5822b436273b941175b4b6cda63803b1ca86453bea3cbdb" Nov 26 05:55:05 crc kubenswrapper[4871]: E1126 05:55:05.352193 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-5kslm_openstack-operators(9253bdc4-d16f-42eb-8704-0965e99dfe47)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" podUID="9253bdc4-d16f-42eb-8704-0965e99dfe47" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.372819 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.374941 4871 scope.go:117] "RemoveContainer" containerID="e928a5d01b0df2a151431739d78e64fb7bf7a8623907e43210121abf106b50f5" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.383588 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.385541 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.400632 4871 scope.go:117] "RemoveContainer" containerID="74d492fe0ccfa70ad5822b436273b941175b4b6cda63803b1ca86453bea3cbdb" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.400696 4871 scope.go:117] "RemoveContainer" containerID="dc8c88089b910af6abac5748429b7c27d8da141ce6b2c5bc7074c4d969095fd8" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.400749 4871 scope.go:117] "RemoveContainer" containerID="afaa8c9eda53548ab394745c9263b386194cd8dae3a7ff681171d9886ad0de4f" Nov 26 05:55:05 crc kubenswrapper[4871]: E1126 05:55:05.400910 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-5kslm_openstack-operators(9253bdc4-d16f-42eb-8704-0965e99dfe47)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" podUID="9253bdc4-d16f-42eb-8704-0965e99dfe47" Nov 26 05:55:05 crc kubenswrapper[4871]: E1126 05:55:05.401018 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=cinder-operator-controller-manager-6b7f75547b-wmwwk_openstack-operators(ea13fc75-b3f0-48d3-9d86-5262df2957eb)\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" podUID="ea13fc75-b3f0-48d3-9d86-5262df2957eb" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.401269 4871 scope.go:117] "RemoveContainer" containerID="93382560a6bf53626fc30bdfd2b06a3a9aeb151f19eacb371b010cdbfd1d10a7" Nov 26 05:55:05 crc kubenswrapper[4871]: E1126 05:55:05.401726 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=keystone-operator-controller-manager-7b4567c7cf-4gvxx_openstack-operators(32cd59dd-1a82-4fce-81b1-ebc8f75f1e93)\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" podUID="32cd59dd-1a82-4fce-81b1-ebc8f75f1e93" Nov 26 05:55:05 crc kubenswrapper[4871]: E1126 05:55:05.402814 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=glance-operator-controller-manager-589cbd6b5b-czv5j_openstack-operators(94ce6277-5176-415b-9f4d-847a73c93723)\"" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" podUID="94ce6277-5176-415b-9f4d-847a73c93723" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.407438 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.416499 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.425194 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.462306 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.482635 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.510575 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:55:05 crc kubenswrapper[4871]: E1126 05:55:05.511669 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.526439 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.527233 4871 scope.go:117] "RemoveContainer" containerID="8e5c88c9818a4ab1833f0c6d78b0c3022927db845bb1c6ef9e37848443b4c8d3" Nov 26 05:55:05 crc kubenswrapper[4871]: E1126 05:55:05.527551 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-lzsqj_openstack-operators(51410db5-d309-4625-8f36-02cf8f0ba419)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" podUID="51410db5-d309-4625-8f36-02cf8f0ba419" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.536137 4871 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.592217 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.593637 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.593760 4871 scope.go:117] "RemoveContainer" containerID="d4793c6d4e0ccf18be306f9d76a620f15aa14ca31ce8683e83949ca8484ad021" Nov 26 05:55:05 crc kubenswrapper[4871]: E1126 05:55:05.594253 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=manila-operator-controller-manager-5d499bf58b-jvztg_openstack-operators(6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c)\"" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" podUID="6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.628521 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.629437 4871 scope.go:117] "RemoveContainer" containerID="d3e843dfb2b36b2984f6143425353086c025df5ad8c3aaf090607e269c3214ec" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.649215 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.650309 4871 scope.go:117] "RemoveContainer" containerID="5c5b1632678267f5cf169975e9c5be40531f8af2f11068fc8ce7f74fc914b6d5" Nov 26 05:55:05 crc kubenswrapper[4871]: E1126 05:55:05.650717 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=mariadb-operator-controller-manager-66f4dd4bc7-9xghq_openstack-operators(2c7b5f25-e4ef-4abd-ba84-61b98f194ddd)\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" podUID="2c7b5f25-e4ef-4abd-ba84-61b98f194ddd" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.661454 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.661661 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.661875 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.662318 4871 scope.go:117] "RemoveContainer" containerID="4ab714acd2ecba5f59cfb4fc0025d012b4661c776f9d5ba94add92812581edd7" Nov 26 05:55:05 crc kubenswrapper[4871]: E1126 05:55:05.662659 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=neutron-operator-controller-manager-6fdcddb789-6lpnj_openstack-operators(19a75285-dcb7-4f34-b79c-613c96d555de)\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" podUID="19a75285-dcb7-4f34-b79c-613c96d555de" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.673638 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.677608 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.678349 4871 scope.go:117] "RemoveContainer" containerID="98b213ed28ee131a605d80664fee98e4f538d59be7fadc670296bfda45dc6c00" Nov 26 05:55:05 crc kubenswrapper[4871]: E1126 05:55:05.678617 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-9lvtk_openstack-operators(33ba2b4e-6239-43c0-a694-6495b7ae2ba3)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" podUID="33ba2b4e-6239-43c0-a694-6495b7ae2ba3" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.689876 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.708707 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.708757 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.709573 4871 scope.go:117] "RemoveContainer" containerID="73d05c5d7838ca23109676f65c3dfb9cdb7dd0059e28e646adbf471e17639eda" Nov 26 05:55:05 crc kubenswrapper[4871]: E1126 05:55:05.709827 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ovn-operator-controller-manager-56897c768d-shgb6_openstack-operators(6ccd73b2-dbfd-4cd6-845c-a61af4f20f96)\"" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" podUID="6ccd73b2-dbfd-4cd6-845c-a61af4f20f96" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.728931 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.728996 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.729814 4871 scope.go:117] "RemoveContainer" containerID="5307cbbd3c0384c43c4b7f1411ac79c0581466bca67d59c5ab3e8920e1c80b9f" Nov 26 05:55:05 crc kubenswrapper[4871]: E1126 05:55:05.730087 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=placement-operator-controller-manager-57988cc5b5-dxbwn_openstack-operators(1cc75505-b927-488b-8a16-4fda9a1c2dca)\"" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" podUID="1cc75505-b927-488b-8a16-4fda9a1c2dca" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.793344 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.805856 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.806777 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.810741 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.810792 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.811817 4871 scope.go:117] "RemoveContainer" containerID="65298b8f443e5f96e69187f67c045c0abd1ea806e49c09cfebbed3edd6629779" Nov 26 05:55:05 crc kubenswrapper[4871]: E1126 05:55:05.812222 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=swift-operator-controller-manager-d77b94747-skx5k_openstack-operators(4b0778b1-b974-4ce6-bac4-59920ab67dd7)\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" podUID="4b0778b1-b974-4ce6-bac4-59920ab67dd7" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.813540 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.831239 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.895000 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.895040 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.895803 4871 scope.go:117] "RemoveContainer" containerID="6ca5a382c8f0ffda69d2f26145501cba6b19ef3c51811c5f2f2fa44ffbcc1085" Nov 26 05:55:05 crc kubenswrapper[4871]: E1126 05:55:05.896085 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=telemetry-operator-controller-manager-76cc84c6bb-6kccm_openstack-operators(974fe30e-68b5-42bb-9940-a2000ab315f8)\"" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" podUID="974fe30e-68b5-42bb-9940-a2000ab315f8" Nov 26 05:55:05 crc kubenswrapper[4871]: I1126 05:55:05.960466 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-notifications-server-dockercfg-ngj58" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.045555 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.045593 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.046351 4871 scope.go:117] "RemoveContainer" containerID="2d2b2ac177577a6e0227c3c13857f6590d3a3ca1684922d91074097cd521178d" Nov 26 05:55:06 crc kubenswrapper[4871]: E1126 05:55:06.046608 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=watcher-operator-controller-manager-656dcb59d4-v95x7_openstack-operators(8d32351e-c0cc-4c2a-89b2-a79b61cf632e)\"" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" podUID="8d32351e-c0cc-4c2a-89b2-a79b61cf632e" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.066867 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.247938 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.370172 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.385667 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.413408 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" event={"ID":"8c65e9f4-e3de-4bce-851a-f85c1036daa7","Type":"ContainerStarted","Data":"6b98ada13c8158e4a8ef2ec7858a9df79ca01d2a4a9b1a603467065dd86856aa"} Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.413844 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.415271 4871 generic.go:334] "Generic (PLEG): container finished" podID="250180c0-d204-44e0-83b1-64259ea3bd68" containerID="1c511dca4f4fc206956567d200e0138c02be4963624d7263a04748355e10b845" exitCode=1 Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.415364 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-v7fsr" event={"ID":"250180c0-d204-44e0-83b1-64259ea3bd68","Type":"ContainerDied","Data":"1c511dca4f4fc206956567d200e0138c02be4963624d7263a04748355e10b845"} Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.416025 4871 scope.go:117] "RemoveContainer" containerID="1c511dca4f4fc206956567d200e0138c02be4963624d7263a04748355e10b845" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.417977 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" event={"ID":"f68377a4-dee0-404b-988a-4f0673466e62","Type":"ContainerStarted","Data":"927fa8832ee209a2f54aa06a55142fde3702d8a97972becf568442c66af52d74"} Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.418477 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.420314 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" event={"ID":"4659b831-32eb-4da2-97f3-f654a299605e","Type":"ContainerStarted","Data":"657735c30874b6cd95947497e96919b154bc544c19b88e0f57900a979628a1d2"} Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.420678 4871 scope.go:117] "RemoveContainer" containerID="d4793c6d4e0ccf18be306f9d76a620f15aa14ca31ce8683e83949ca8484ad021" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.420861 4871 scope.go:117] "RemoveContainer" containerID="4ab714acd2ecba5f59cfb4fc0025d012b4661c776f9d5ba94add92812581edd7" Nov 26 05:55:06 crc kubenswrapper[4871]: E1126 05:55:06.420896 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=manila-operator-controller-manager-5d499bf58b-jvztg_openstack-operators(6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c)\"" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" podUID="6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c" Nov 26 05:55:06 crc kubenswrapper[4871]: E1126 05:55:06.421108 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=neutron-operator-controller-manager-6fdcddb789-6lpnj_openstack-operators(19a75285-dcb7-4f34-b79c-613c96d555de)\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" podUID="19a75285-dcb7-4f34-b79c-613c96d555de" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.539781 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.724252 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.724653 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.731700 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.744001 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.754948 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.780575 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.865213 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-wnwhg" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.914588 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 26 05:55:06 crc kubenswrapper[4871]: I1126 05:55:06.985374 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.010112 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.051515 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.221381 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-internal-svc" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.226281 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.412944 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.416294 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.434837 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.434916 4871 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="e556fb7a67a2ab3468f23c5b9e78689aa947b6b257e9076049c6bc73006a10b1" exitCode=137 Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.439835 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-v7fsr" event={"ID":"250180c0-d204-44e0-83b1-64259ea3bd68","Type":"ContainerStarted","Data":"247f8ec160f261608960c1ecdd746a61600ddc39bbc945290e0f587e534107a2"} Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.533856 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-f9cxs" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.537561 4871 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-8wfbp" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.589493 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.589659 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.631852 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.775428 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.775499 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.775625 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.775667 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.775742 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.775746 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.775802 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.775799 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.775979 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.776421 4871 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.776449 4871 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.776466 4871 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.776485 4871 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.787388 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.796724 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.850894 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 26 05:55:07 crc kubenswrapper[4871]: I1126 05:55:07.878236 4871 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 05:55:08 crc kubenswrapper[4871]: I1126 05:55:08.062288 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 26 05:55:08 crc kubenswrapper[4871]: I1126 05:55:08.171158 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 26 05:55:08 crc kubenswrapper[4871]: I1126 05:55:08.453516 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 26 05:55:08 crc kubenswrapper[4871]: I1126 05:55:08.453626 4871 scope.go:117] "RemoveContainer" containerID="e556fb7a67a2ab3468f23c5b9e78689aa947b6b257e9076049c6bc73006a10b1" Nov 26 05:55:08 crc kubenswrapper[4871]: I1126 05:55:08.453794 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 05:55:08 crc kubenswrapper[4871]: I1126 05:55:08.520956 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 26 05:55:08 crc kubenswrapper[4871]: I1126 05:55:08.783112 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 26 05:55:08 crc kubenswrapper[4871]: I1126 05:55:08.845632 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 26 05:55:08 crc kubenswrapper[4871]: I1126 05:55:08.845866 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 26 05:55:09 crc kubenswrapper[4871]: I1126 05:55:09.225085 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 05:55:09 crc kubenswrapper[4871]: I1126 05:55:09.269121 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 26 05:55:10 crc kubenswrapper[4871]: I1126 05:55:10.507592 4871 scope.go:117] "RemoveContainer" containerID="65a47e92f235132ead33a245dde9c9bc40c3865f3032253f4754a4ad44d0946f" Nov 26 05:55:11 crc kubenswrapper[4871]: I1126 05:55:11.115712 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:55:11 crc kubenswrapper[4871]: I1126 05:55:11.116987 4871 scope.go:117] "RemoveContainer" containerID="be9349e0a50564a2f48e1d280550e525f57c5579e87284e9ef90455ce48764d5" Nov 26 05:55:11 crc kubenswrapper[4871]: E1126 05:55:11.117463 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-x5hqw_openstack-operators(06b4e3ae-765b-41c4-9334-4e33c2dc305f)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" podUID="06b4e3ae-765b-41c4-9334-4e33c2dc305f" Nov 26 05:55:11 crc kubenswrapper[4871]: I1126 05:55:11.483479 4871 generic.go:334] "Generic (PLEG): container finished" podID="c2ecf354-32f2-4cb3-80f1-e964ce5a3bdc" containerID="9f3ef0ff30939fda3c271aa2711d0d90db6d3504c75f78d87d56ec764bd52138" exitCode=1 Nov 26 05:55:11 crc kubenswrapper[4871]: I1126 05:55:11.483561 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-gkprb" event={"ID":"c2ecf354-32f2-4cb3-80f1-e964ce5a3bdc","Type":"ContainerDied","Data":"9f3ef0ff30939fda3c271aa2711d0d90db6d3504c75f78d87d56ec764bd52138"} Nov 26 05:55:11 crc kubenswrapper[4871]: I1126 05:55:11.484756 4871 scope.go:117] "RemoveContainer" containerID="9f3ef0ff30939fda3c271aa2711d0d90db6d3504c75f78d87d56ec764bd52138" Nov 26 05:55:11 crc kubenswrapper[4871]: I1126 05:55:11.486514 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"e20fd17b-5b64-4272-9876-347ea057aa04","Type":"ContainerStarted","Data":"dd4ff2c33d649a9f7edbd7c8eea34ef6bed9bd9203220d59911e81f3d065b146"} Nov 26 05:55:11 crc kubenswrapper[4871]: I1126 05:55:11.486926 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 26 05:55:11 crc kubenswrapper[4871]: I1126 05:55:11.507087 4871 scope.go:117] "RemoveContainer" containerID="d50ffa6bc84bd8f313e67110c71958eac1da1e8b20a960d11d6ae8b002c18e67" Nov 26 05:55:11 crc kubenswrapper[4871]: E1126 05:55:11.507333 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=operator pod=rabbitmq-cluster-operator-manager-668c99d594-6c6pc_openstack-operators(0b2406e7-8b16-45e1-b726-645d22421af5)\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" podUID="0b2406e7-8b16-45e1-b726-645d22421af5" Nov 26 05:55:11 crc kubenswrapper[4871]: I1126 05:55:11.507510 4871 scope.go:117] "RemoveContainer" containerID="5628d1980e13841fb35a22f86783a40da3176541f20ca09b1c02f7c5a1e1f7de" Nov 26 05:55:11 crc kubenswrapper[4871]: I1126 05:55:11.683869 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:55:11 crc kubenswrapper[4871]: I1126 05:55:11.684893 4871 scope.go:117] "RemoveContainer" containerID="a5d69025752f2e5a3195cc36deaa38d3372fb32e4cb749ef2b5d47cda8ec7baf" Nov 26 05:55:11 crc kubenswrapper[4871]: E1126 05:55:11.685189 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=openstack-operator-controller-manager-56868586f6-2v8hx_openstack-operators(6d7ff4ed-503b-4184-8633-47598150b7f0)\"" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" podUID="6d7ff4ed-503b-4184-8633-47598150b7f0" Nov 26 05:55:12 crc kubenswrapper[4871]: I1126 05:55:12.499469 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-gkprb" event={"ID":"c2ecf354-32f2-4cb3-80f1-e964ce5a3bdc","Type":"ContainerStarted","Data":"cc806ae4d8ad7479b7c2b20a74123eaa8d5ca83ae8ccb2a54b191a7b1338c818"} Nov 26 05:55:12 crc kubenswrapper[4871]: I1126 05:55:12.501576 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" event={"ID":"0f2d5628-2ad3-400c-bc77-b0251683a83a","Type":"ContainerStarted","Data":"7160015608ade4cb823a734bb8e2b734bced6695782ed16d0150b6649b90c009"} Nov 26 05:55:12 crc kubenswrapper[4871]: I1126 05:55:12.502067 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" Nov 26 05:55:15 crc kubenswrapper[4871]: I1126 05:55:15.198894 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-kkjjm"] Nov 26 05:55:15 crc kubenswrapper[4871]: I1126 05:55:15.210415 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-kkjjm"] Nov 26 05:55:15 crc kubenswrapper[4871]: I1126 05:55:15.219033 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-lwfp5"] Nov 26 05:55:15 crc kubenswrapper[4871]: I1126 05:55:15.227800 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-6z5bf"] Nov 26 05:55:15 crc kubenswrapper[4871]: I1126 05:55:15.236565 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-lwfp5"] Nov 26 05:55:15 crc kubenswrapper[4871]: I1126 05:55:15.246020 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-6z5bf"] Nov 26 05:55:15 crc kubenswrapper[4871]: I1126 05:55:15.255185 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-7pzbd"] Nov 26 05:55:15 crc kubenswrapper[4871]: I1126 05:55:15.263796 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-7pzbd"] Nov 26 05:55:15 crc kubenswrapper[4871]: I1126 05:55:15.271747 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-9kf7b"] Nov 26 05:55:15 crc kubenswrapper[4871]: I1126 05:55:15.275565 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-bdpn4" Nov 26 05:55:15 crc kubenswrapper[4871]: I1126 05:55:15.279840 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-9kf7b"] Nov 26 05:55:15 crc kubenswrapper[4871]: I1126 05:55:15.372879 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" Nov 26 05:55:15 crc kubenswrapper[4871]: I1126 05:55:15.375197 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-clm5v" Nov 26 05:55:15 crc kubenswrapper[4871]: I1126 05:55:15.631699 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rlr55" Nov 26 05:55:16 crc kubenswrapper[4871]: I1126 05:55:16.507731 4871 scope.go:117] "RemoveContainer" containerID="93382560a6bf53626fc30bdfd2b06a3a9aeb151f19eacb371b010cdbfd1d10a7" Nov 26 05:55:16 crc kubenswrapper[4871]: E1126 05:55:16.508012 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=glance-operator-controller-manager-589cbd6b5b-czv5j_openstack-operators(94ce6277-5176-415b-9f4d-847a73c93723)\"" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" podUID="94ce6277-5176-415b-9f4d-847a73c93723" Nov 26 05:55:16 crc kubenswrapper[4871]: I1126 05:55:16.508046 4871 scope.go:117] "RemoveContainer" containerID="74d492fe0ccfa70ad5822b436273b941175b4b6cda63803b1ca86453bea3cbdb" Nov 26 05:55:16 crc kubenswrapper[4871]: E1126 05:55:16.508413 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-5kslm_openstack-operators(9253bdc4-d16f-42eb-8704-0965e99dfe47)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" podUID="9253bdc4-d16f-42eb-8704-0965e99dfe47" Nov 26 05:55:16 crc kubenswrapper[4871]: I1126 05:55:16.508909 4871 scope.go:117] "RemoveContainer" containerID="016f7c2c54f212f8cb1c8a80a867adc428a4f3c47a107fd15b56791db3a40233" Nov 26 05:55:16 crc kubenswrapper[4871]: E1126 05:55:16.509245 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=designate-operator-controller-manager-955677c94-tsz49_openstack-operators(70168336-54b1-481f-b6a0-d565be07d353)\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" podUID="70168336-54b1-481f-b6a0-d565be07d353" Nov 26 05:55:16 crc kubenswrapper[4871]: I1126 05:55:16.509627 4871 scope.go:117] "RemoveContainer" containerID="98b213ed28ee131a605d80664fee98e4f538d59be7fadc670296bfda45dc6c00" Nov 26 05:55:16 crc kubenswrapper[4871]: I1126 05:55:16.531676 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="450179f7-baf0-481d-ad0e-4d3534ee28f4" path="/var/lib/kubelet/pods/450179f7-baf0-481d-ad0e-4d3534ee28f4/volumes" Nov 26 05:55:16 crc kubenswrapper[4871]: I1126 05:55:16.533050 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6" path="/var/lib/kubelet/pods/b4385b1e-f5ee-4a06-8e3e-5d06d7fad5a6/volumes" Nov 26 05:55:16 crc kubenswrapper[4871]: I1126 05:55:16.534621 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1ff3641-2ac2-4223-b2e5-c0bd333bec1e" path="/var/lib/kubelet/pods/c1ff3641-2ac2-4223-b2e5-c0bd333bec1e/volumes" Nov 26 05:55:16 crc kubenswrapper[4871]: I1126 05:55:16.536991 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8bdb9c7-91c3-40dc-920e-6e333b18f331" path="/var/lib/kubelet/pods/c8bdb9c7-91c3-40dc-920e-6e333b18f331/volumes" Nov 26 05:55:16 crc kubenswrapper[4871]: I1126 05:55:16.538637 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e72bbf93-367f-4207-b846-b9cf819b9b4c" path="/var/lib/kubelet/pods/e72bbf93-367f-4207-b846-b9cf819b9b4c/volumes" Nov 26 05:55:17 crc kubenswrapper[4871]: I1126 05:55:17.507966 4871 scope.go:117] "RemoveContainer" containerID="5c5b1632678267f5cf169975e9c5be40531f8af2f11068fc8ce7f74fc914b6d5" Nov 26 05:55:17 crc kubenswrapper[4871]: I1126 05:55:17.508381 4871 scope.go:117] "RemoveContainer" containerID="d4793c6d4e0ccf18be306f9d76a620f15aa14ca31ce8683e83949ca8484ad021" Nov 26 05:55:17 crc kubenswrapper[4871]: I1126 05:55:17.508423 4871 scope.go:117] "RemoveContainer" containerID="dc8c88089b910af6abac5748429b7c27d8da141ce6b2c5bc7074c4d969095fd8" Nov 26 05:55:17 crc kubenswrapper[4871]: I1126 05:55:17.508554 4871 scope.go:117] "RemoveContainer" containerID="65298b8f443e5f96e69187f67c045c0abd1ea806e49c09cfebbed3edd6629779" Nov 26 05:55:17 crc kubenswrapper[4871]: E1126 05:55:17.508863 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=keystone-operator-controller-manager-7b4567c7cf-4gvxx_openstack-operators(32cd59dd-1a82-4fce-81b1-ebc8f75f1e93)\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" podUID="32cd59dd-1a82-4fce-81b1-ebc8f75f1e93" Nov 26 05:55:17 crc kubenswrapper[4871]: E1126 05:55:17.509144 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=swift-operator-controller-manager-d77b94747-skx5k_openstack-operators(4b0778b1-b974-4ce6-bac4-59920ab67dd7)\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" podUID="4b0778b1-b974-4ce6-bac4-59920ab67dd7" Nov 26 05:55:17 crc kubenswrapper[4871]: I1126 05:55:17.566517 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" event={"ID":"33ba2b4e-6239-43c0-a694-6495b7ae2ba3","Type":"ContainerStarted","Data":"b4c9f9e64f5dd9074ed3ac266567169885958a9a093effefaa4f4e6be62a6885"} Nov 26 05:55:17 crc kubenswrapper[4871]: I1126 05:55:17.568102 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" Nov 26 05:55:18 crc kubenswrapper[4871]: I1126 05:55:18.507957 4871 scope.go:117] "RemoveContainer" containerID="6ca5a382c8f0ffda69d2f26145501cba6b19ef3c51811c5f2f2fa44ffbcc1085" Nov 26 05:55:18 crc kubenswrapper[4871]: I1126 05:55:18.508362 4871 scope.go:117] "RemoveContainer" containerID="2d2b2ac177577a6e0227c3c13857f6590d3a3ca1684922d91074097cd521178d" Nov 26 05:55:18 crc kubenswrapper[4871]: E1126 05:55:18.508624 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=telemetry-operator-controller-manager-76cc84c6bb-6kccm_openstack-operators(974fe30e-68b5-42bb-9940-a2000ab315f8)\"" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" podUID="974fe30e-68b5-42bb-9940-a2000ab315f8" Nov 26 05:55:18 crc kubenswrapper[4871]: E1126 05:55:18.508855 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=watcher-operator-controller-manager-656dcb59d4-v95x7_openstack-operators(8d32351e-c0cc-4c2a-89b2-a79b61cf632e)\"" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" podUID="8d32351e-c0cc-4c2a-89b2-a79b61cf632e" Nov 26 05:55:18 crc kubenswrapper[4871]: I1126 05:55:18.580657 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" event={"ID":"2c7b5f25-e4ef-4abd-ba84-61b98f194ddd","Type":"ContainerStarted","Data":"cd23e58142af64b2284fb9897a90304d413fed86ffa291afeb584fa89eec1905"} Nov 26 05:55:18 crc kubenswrapper[4871]: I1126 05:55:18.580948 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" Nov 26 05:55:18 crc kubenswrapper[4871]: I1126 05:55:18.585365 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" event={"ID":"6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c","Type":"ContainerStarted","Data":"215529265bffe47162300c9fa7312383075c4d667f5803becedd007ceffcc4a6"} Nov 26 05:55:18 crc kubenswrapper[4871]: I1126 05:55:18.585690 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" Nov 26 05:55:19 crc kubenswrapper[4871]: I1126 05:55:19.065855 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 26 05:55:19 crc kubenswrapper[4871]: I1126 05:55:19.508063 4871 scope.go:117] "RemoveContainer" containerID="73d05c5d7838ca23109676f65c3dfb9cdb7dd0059e28e646adbf471e17639eda" Nov 26 05:55:19 crc kubenswrapper[4871]: I1126 05:55:19.508217 4871 scope.go:117] "RemoveContainer" containerID="5307cbbd3c0384c43c4b7f1411ac79c0581466bca67d59c5ab3e8920e1c80b9f" Nov 26 05:55:19 crc kubenswrapper[4871]: E1126 05:55:19.508634 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=placement-operator-controller-manager-57988cc5b5-dxbwn_openstack-operators(1cc75505-b927-488b-8a16-4fda9a1c2dca)\"" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" podUID="1cc75505-b927-488b-8a16-4fda9a1c2dca" Nov 26 05:55:20 crc kubenswrapper[4871]: I1126 05:55:20.507720 4871 scope.go:117] "RemoveContainer" containerID="afaa8c9eda53548ab394745c9263b386194cd8dae3a7ff681171d9886ad0de4f" Nov 26 05:55:20 crc kubenswrapper[4871]: I1126 05:55:20.508174 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:55:20 crc kubenswrapper[4871]: I1126 05:55:20.508342 4871 scope.go:117] "RemoveContainer" containerID="8e5c88c9818a4ab1833f0c6d78b0c3022927db845bb1c6ef9e37848443b4c8d3" Nov 26 05:55:20 crc kubenswrapper[4871]: I1126 05:55:20.508452 4871 scope.go:117] "RemoveContainer" containerID="4ab714acd2ecba5f59cfb4fc0025d012b4661c776f9d5ba94add92812581edd7" Nov 26 05:55:20 crc kubenswrapper[4871]: E1126 05:55:20.508687 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:55:20 crc kubenswrapper[4871]: E1126 05:55:20.508973 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=neutron-operator-controller-manager-6fdcddb789-6lpnj_openstack-operators(19a75285-dcb7-4f34-b79c-613c96d555de)\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" podUID="19a75285-dcb7-4f34-b79c-613c96d555de" Nov 26 05:55:20 crc kubenswrapper[4871]: I1126 05:55:20.607377 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" event={"ID":"6ccd73b2-dbfd-4cd6-845c-a61af4f20f96","Type":"ContainerStarted","Data":"4a0aefa82db27d71864a273b87c04b505de2cbe35fc470a11b65b9413c948aa9"} Nov 26 05:55:20 crc kubenswrapper[4871]: I1126 05:55:20.609147 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" Nov 26 05:55:21 crc kubenswrapper[4871]: I1126 05:55:21.114697 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:55:21 crc kubenswrapper[4871]: I1126 05:55:21.115661 4871 scope.go:117] "RemoveContainer" containerID="be9349e0a50564a2f48e1d280550e525f57c5579e87284e9ef90455ce48764d5" Nov 26 05:55:21 crc kubenswrapper[4871]: E1126 05:55:21.115986 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-x5hqw_openstack-operators(06b4e3ae-765b-41c4-9334-4e33c2dc305f)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" podUID="06b4e3ae-765b-41c4-9334-4e33c2dc305f" Nov 26 05:55:21 crc kubenswrapper[4871]: I1126 05:55:21.542923 4871 scope.go:117] "RemoveContainer" containerID="7ac7ebd344eab8892ece8a92a1b84eb468249911332dc50689e62648d49b49c7" Nov 26 05:55:21 crc kubenswrapper[4871]: I1126 05:55:21.586505 4871 scope.go:117] "RemoveContainer" containerID="bb34da1a086146aa7815a37eef79a6585c3a58342bfd9113c0a6818bf15c248f" Nov 26 05:55:21 crc kubenswrapper[4871]: I1126 05:55:21.618505 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" event={"ID":"51410db5-d309-4625-8f36-02cf8f0ba419","Type":"ContainerStarted","Data":"b190776a21e44b82d9fc72433725934efb7bdee79e3448060bf0a2c29b409bab"} Nov 26 05:55:21 crc kubenswrapper[4871]: I1126 05:55:21.618719 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" Nov 26 05:55:21 crc kubenswrapper[4871]: I1126 05:55:21.624819 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" event={"ID":"ea13fc75-b3f0-48d3-9d86-5262df2957eb","Type":"ContainerStarted","Data":"a7be8283087ef15ab55493af0ffb36ecff9e6305cdddc57217518f8c296041eb"} Nov 26 05:55:21 crc kubenswrapper[4871]: I1126 05:55:21.625109 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" Nov 26 05:55:21 crc kubenswrapper[4871]: I1126 05:55:21.626751 4871 scope.go:117] "RemoveContainer" containerID="5d01001680921a32f895fff185bc882d16b7258d62ddfedd545f48e12e45a0fc" Nov 26 05:55:21 crc kubenswrapper[4871]: I1126 05:55:21.683729 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:55:21 crc kubenswrapper[4871]: I1126 05:55:21.684508 4871 scope.go:117] "RemoveContainer" containerID="a5d69025752f2e5a3195cc36deaa38d3372fb32e4cb749ef2b5d47cda8ec7baf" Nov 26 05:55:21 crc kubenswrapper[4871]: E1126 05:55:21.684827 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=openstack-operator-controller-manager-56868586f6-2v8hx_openstack-operators(6d7ff4ed-503b-4184-8633-47598150b7f0)\"" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" podUID="6d7ff4ed-503b-4184-8633-47598150b7f0" Nov 26 05:55:21 crc kubenswrapper[4871]: I1126 05:55:21.691496 4871 scope.go:117] "RemoveContainer" containerID="7bac7bd9dc077623d52482f17b20a5571b728ffdb858f67e1e4e5e42bda7a265" Nov 26 05:55:21 crc kubenswrapper[4871]: I1126 05:55:21.727843 4871 scope.go:117] "RemoveContainer" containerID="c935206db6eafa84766722bfdd4c923595613409c3a25fe5b2ea4a43d6e58967" Nov 26 05:55:22 crc kubenswrapper[4871]: I1126 05:55:22.050569 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-38ab-account-create-update-5728z"] Nov 26 05:55:22 crc kubenswrapper[4871]: I1126 05:55:22.065773 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-38ab-account-create-update-5728z"] Nov 26 05:55:22 crc kubenswrapper[4871]: I1126 05:55:22.078276 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-5174-account-create-update-j9rcn"] Nov 26 05:55:22 crc kubenswrapper[4871]: I1126 05:55:22.088687 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-fg97c"] Nov 26 05:55:22 crc kubenswrapper[4871]: I1126 05:55:22.096233 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-rswx2"] Nov 26 05:55:22 crc kubenswrapper[4871]: I1126 05:55:22.103923 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-rwvqt"] Nov 26 05:55:22 crc kubenswrapper[4871]: I1126 05:55:22.113931 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-rwvqt"] Nov 26 05:55:22 crc kubenswrapper[4871]: I1126 05:55:22.122505 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-5174-account-create-update-j9rcn"] Nov 26 05:55:22 crc kubenswrapper[4871]: I1126 05:55:22.130516 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-fg97c"] Nov 26 05:55:22 crc kubenswrapper[4871]: I1126 05:55:22.139413 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-rswx2"] Nov 26 05:55:22 crc kubenswrapper[4871]: I1126 05:55:22.558079 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50e575b9-71c6-466e-993f-12c04b2834db" path="/var/lib/kubelet/pods/50e575b9-71c6-466e-993f-12c04b2834db/volumes" Nov 26 05:55:22 crc kubenswrapper[4871]: I1126 05:55:22.559625 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc" path="/var/lib/kubelet/pods/7da43cea-c9cb-4c44-b0cb-f0895ef1a7fc/volumes" Nov 26 05:55:22 crc kubenswrapper[4871]: I1126 05:55:22.560214 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b91e09cf-5ae8-4288-a212-d52274d5ef05" path="/var/lib/kubelet/pods/b91e09cf-5ae8-4288-a212-d52274d5ef05/volumes" Nov 26 05:55:22 crc kubenswrapper[4871]: I1126 05:55:22.560891 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2" path="/var/lib/kubelet/pods/cc22a7bb-6c3c-44cb-b37c-7d31cab8b3d2/volumes" Nov 26 05:55:22 crc kubenswrapper[4871]: I1126 05:55:22.561924 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e161c874-eca9-4f95-8419-660b27e5d21e" path="/var/lib/kubelet/pods/e161c874-eca9-4f95-8419-660b27e5d21e/volumes" Nov 26 05:55:23 crc kubenswrapper[4871]: I1126 05:55:23.032995 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-c975-account-create-update-767bz"] Nov 26 05:55:23 crc kubenswrapper[4871]: I1126 05:55:23.043319 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-c975-account-create-update-767bz"] Nov 26 05:55:24 crc kubenswrapper[4871]: I1126 05:55:24.522718 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32032cf4-30d8-45c4-a12e-f1e79eda1c52" path="/var/lib/kubelet/pods/32032cf4-30d8-45c4-a12e-f1e79eda1c52/volumes" Nov 26 05:55:25 crc kubenswrapper[4871]: I1126 05:55:25.290566 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-wmwwk" Nov 26 05:55:25 crc kubenswrapper[4871]: I1126 05:55:25.506820 4871 scope.go:117] "RemoveContainer" containerID="d50ffa6bc84bd8f313e67110c71958eac1da1e8b20a960d11d6ae8b002c18e67" Nov 26 05:55:25 crc kubenswrapper[4871]: I1126 05:55:25.528733 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-lzsqj" Nov 26 05:55:25 crc kubenswrapper[4871]: I1126 05:55:25.595217 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-jvztg" Nov 26 05:55:25 crc kubenswrapper[4871]: I1126 05:55:25.652933 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-9xghq" Nov 26 05:55:25 crc kubenswrapper[4871]: I1126 05:55:25.677058 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-9lvtk" Nov 26 05:55:25 crc kubenswrapper[4871]: I1126 05:55:25.714091 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-shgb6" Nov 26 05:55:26 crc kubenswrapper[4871]: I1126 05:55:26.721514 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-6c6pc" event={"ID":"0b2406e7-8b16-45e1-b726-645d22421af5","Type":"ContainerStarted","Data":"e2f984f5086826bae810d8372afb05932b30d83e6b9e2d7b64fc8416a0735057"} Nov 26 05:55:28 crc kubenswrapper[4871]: I1126 05:55:28.507712 4871 scope.go:117] "RemoveContainer" containerID="74d492fe0ccfa70ad5822b436273b941175b4b6cda63803b1ca86453bea3cbdb" Nov 26 05:55:28 crc kubenswrapper[4871]: I1126 05:55:28.751649 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" event={"ID":"9253bdc4-d16f-42eb-8704-0965e99dfe47","Type":"ContainerStarted","Data":"a893aeb774a6963f0693de51eca9508944a782a4d748f69f9fafb00492349ae5"} Nov 26 05:55:28 crc kubenswrapper[4871]: I1126 05:55:28.752521 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" Nov 26 05:55:29 crc kubenswrapper[4871]: I1126 05:55:29.507059 4871 scope.go:117] "RemoveContainer" containerID="6ca5a382c8f0ffda69d2f26145501cba6b19ef3c51811c5f2f2fa44ffbcc1085" Nov 26 05:55:29 crc kubenswrapper[4871]: I1126 05:55:29.507127 4871 scope.go:117] "RemoveContainer" containerID="2d2b2ac177577a6e0227c3c13857f6590d3a3ca1684922d91074097cd521178d" Nov 26 05:55:30 crc kubenswrapper[4871]: I1126 05:55:30.507024 4871 scope.go:117] "RemoveContainer" containerID="5307cbbd3c0384c43c4b7f1411ac79c0581466bca67d59c5ab3e8920e1c80b9f" Nov 26 05:55:30 crc kubenswrapper[4871]: I1126 05:55:30.507553 4871 scope.go:117] "RemoveContainer" containerID="dc8c88089b910af6abac5748429b7c27d8da141ce6b2c5bc7074c4d969095fd8" Nov 26 05:55:30 crc kubenswrapper[4871]: I1126 05:55:30.795096 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" event={"ID":"974fe30e-68b5-42bb-9940-a2000ab315f8","Type":"ContainerStarted","Data":"2ef645dcb47f74db65834b313c943b9603593669fbeb16b099decb8a8e84667d"} Nov 26 05:55:30 crc kubenswrapper[4871]: I1126 05:55:30.795627 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" Nov 26 05:55:30 crc kubenswrapper[4871]: I1126 05:55:30.801404 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" event={"ID":"8d32351e-c0cc-4c2a-89b2-a79b61cf632e","Type":"ContainerStarted","Data":"a494ffa82b7d1ada621f0fc8152106361933cf2efbf93d6f1072814d5d330ea6"} Nov 26 05:55:30 crc kubenswrapper[4871]: I1126 05:55:30.802936 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" Nov 26 05:55:31 crc kubenswrapper[4871]: I1126 05:55:31.507203 4871 scope.go:117] "RemoveContainer" containerID="4ab714acd2ecba5f59cfb4fc0025d012b4661c776f9d5ba94add92812581edd7" Nov 26 05:55:31 crc kubenswrapper[4871]: I1126 05:55:31.507374 4871 scope.go:117] "RemoveContainer" containerID="93382560a6bf53626fc30bdfd2b06a3a9aeb151f19eacb371b010cdbfd1d10a7" Nov 26 05:55:31 crc kubenswrapper[4871]: I1126 05:55:31.507840 4871 scope.go:117] "RemoveContainer" containerID="65298b8f443e5f96e69187f67c045c0abd1ea806e49c09cfebbed3edd6629779" Nov 26 05:55:31 crc kubenswrapper[4871]: I1126 05:55:31.507890 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:55:31 crc kubenswrapper[4871]: I1126 05:55:31.508040 4871 scope.go:117] "RemoveContainer" containerID="016f7c2c54f212f8cb1c8a80a867adc428a4f3c47a107fd15b56791db3a40233" Nov 26 05:55:31 crc kubenswrapper[4871]: E1126 05:55:31.508277 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:55:31 crc kubenswrapper[4871]: I1126 05:55:31.816242 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" event={"ID":"32cd59dd-1a82-4fce-81b1-ebc8f75f1e93","Type":"ContainerStarted","Data":"d06510de54c0dc613903397d0bb5870fdbd33fd2ec79f126f560987b0cf876f2"} Nov 26 05:55:31 crc kubenswrapper[4871]: I1126 05:55:31.816665 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" Nov 26 05:55:31 crc kubenswrapper[4871]: I1126 05:55:31.819694 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" event={"ID":"1cc75505-b927-488b-8a16-4fda9a1c2dca","Type":"ContainerStarted","Data":"6e9dc737eec2df707789c121b96c3800b2867dfa201c350b8ef09bff32ac0ddc"} Nov 26 05:55:32 crc kubenswrapper[4871]: I1126 05:55:32.833679 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" event={"ID":"70168336-54b1-481f-b6a0-d565be07d353","Type":"ContainerStarted","Data":"20b1bc8c59cfc1463ab8e1b46bb7ee8b517a5375748db41adfd7b95af34bccf0"} Nov 26 05:55:32 crc kubenswrapper[4871]: I1126 05:55:32.834199 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" Nov 26 05:55:32 crc kubenswrapper[4871]: I1126 05:55:32.836283 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" event={"ID":"94ce6277-5176-415b-9f4d-847a73c93723","Type":"ContainerStarted","Data":"e039d4aaef7900c1a71bc3bebda657f81cea7d28d02b92593493e821ab12910d"} Nov 26 05:55:32 crc kubenswrapper[4871]: I1126 05:55:32.836469 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" Nov 26 05:55:32 crc kubenswrapper[4871]: I1126 05:55:32.838656 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" event={"ID":"19a75285-dcb7-4f34-b79c-613c96d555de","Type":"ContainerStarted","Data":"1e7e349c2022f3aa0b712c41fa23b8fb2bea1fb5b99dcdab6d05becf6e0e5d08"} Nov 26 05:55:32 crc kubenswrapper[4871]: I1126 05:55:32.838946 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" Nov 26 05:55:32 crc kubenswrapper[4871]: I1126 05:55:32.840873 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" event={"ID":"4b0778b1-b974-4ce6-bac4-59920ab67dd7","Type":"ContainerStarted","Data":"bc0b473e83cc9e48080b550fa7d6d0a84f0ffb5846532002e7752d20044e9194"} Nov 26 05:55:32 crc kubenswrapper[4871]: I1126 05:55:32.841139 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" Nov 26 05:55:33 crc kubenswrapper[4871]: I1126 05:55:33.854288 4871 generic.go:334] "Generic (PLEG): container finished" podID="65ad1a09-cc57-45f2-9a13-2d83b8b8221c" containerID="0bafc9f9bfd0d150338e298bea926cb02c244da107018860cf8a793e5871c5e4" exitCode=0 Nov 26 05:55:33 crc kubenswrapper[4871]: I1126 05:55:33.854709 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qks66" event={"ID":"65ad1a09-cc57-45f2-9a13-2d83b8b8221c","Type":"ContainerDied","Data":"0bafc9f9bfd0d150338e298bea926cb02c244da107018860cf8a793e5871c5e4"} Nov 26 05:55:33 crc kubenswrapper[4871]: I1126 05:55:33.855744 4871 scope.go:117] "RemoveContainer" containerID="0bafc9f9bfd0d150338e298bea926cb02c244da107018860cf8a793e5871c5e4" Nov 26 05:55:34 crc kubenswrapper[4871]: I1126 05:55:34.864340 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qks66" event={"ID":"65ad1a09-cc57-45f2-9a13-2d83b8b8221c","Type":"ContainerStarted","Data":"c1c347b81a697f698dae04a6df77a8d5640167c214c0d4ddb19e6b87cd84fdc7"} Nov 26 05:55:34 crc kubenswrapper[4871]: I1126 05:55:34.865234 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-qks66" Nov 26 05:55:34 crc kubenswrapper[4871]: I1126 05:55:34.867801 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-qks66" Nov 26 05:55:35 crc kubenswrapper[4871]: I1126 05:55:35.317276 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-4gvxx" Nov 26 05:55:35 crc kubenswrapper[4871]: I1126 05:55:35.350134 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-5kslm" Nov 26 05:55:35 crc kubenswrapper[4871]: I1126 05:55:35.507295 4871 scope.go:117] "RemoveContainer" containerID="be9349e0a50564a2f48e1d280550e525f57c5579e87284e9ef90455ce48764d5" Nov 26 05:55:35 crc kubenswrapper[4871]: I1126 05:55:35.728983 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" Nov 26 05:55:35 crc kubenswrapper[4871]: I1126 05:55:35.732350 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-dxbwn" Nov 26 05:55:35 crc kubenswrapper[4871]: I1126 05:55:35.876161 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" event={"ID":"06b4e3ae-765b-41c4-9334-4e33c2dc305f","Type":"ContainerStarted","Data":"01765feabf987808cb9fe8f3b39f505d62a6b5e3619f0a1c6834d80e59daa09a"} Nov 26 05:55:35 crc kubenswrapper[4871]: I1126 05:55:35.896089 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6kccm" Nov 26 05:55:36 crc kubenswrapper[4871]: I1126 05:55:36.047666 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-v95x7" Nov 26 05:55:36 crc kubenswrapper[4871]: I1126 05:55:36.508181 4871 scope.go:117] "RemoveContainer" containerID="a5d69025752f2e5a3195cc36deaa38d3372fb32e4cb749ef2b5d47cda8ec7baf" Nov 26 05:55:36 crc kubenswrapper[4871]: I1126 05:55:36.887583 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" event={"ID":"6d7ff4ed-503b-4184-8633-47598150b7f0","Type":"ContainerStarted","Data":"67e36026d4cbf731cb2c3163e2a894414be9487a6cc0edf2e120457dcfe82db9"} Nov 26 05:55:36 crc kubenswrapper[4871]: I1126 05:55:36.887849 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:55:41 crc kubenswrapper[4871]: I1126 05:55:41.115694 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:55:41 crc kubenswrapper[4871]: I1126 05:55:41.124244 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x5hqw" Nov 26 05:55:41 crc kubenswrapper[4871]: I1126 05:55:41.691211 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-56868586f6-2v8hx" Nov 26 05:55:45 crc kubenswrapper[4871]: I1126 05:55:45.306911 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-955677c94-tsz49" Nov 26 05:55:45 crc kubenswrapper[4871]: I1126 05:55:45.324458 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-czv5j" Nov 26 05:55:45 crc kubenswrapper[4871]: I1126 05:55:45.507257 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:55:45 crc kubenswrapper[4871]: E1126 05:55:45.507593 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:55:45 crc kubenswrapper[4871]: I1126 05:55:45.663832 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-6lpnj" Nov 26 05:55:45 crc kubenswrapper[4871]: I1126 05:55:45.814131 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d77b94747-skx5k" Nov 26 05:55:47 crc kubenswrapper[4871]: I1126 05:55:47.556664 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-645b9949f7-48k8g" Nov 26 05:55:55 crc kubenswrapper[4871]: I1126 05:55:55.068638 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-tfgn7"] Nov 26 05:55:55 crc kubenswrapper[4871]: I1126 05:55:55.077013 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-tfgn7"] Nov 26 05:55:56 crc kubenswrapper[4871]: I1126 05:55:56.527807 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6393db54-1c1f-47bc-8669-e56ed280db54" path="/var/lib/kubelet/pods/6393db54-1c1f-47bc-8669-e56ed280db54/volumes" Nov 26 05:56:00 crc kubenswrapper[4871]: I1126 05:56:00.507242 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:56:00 crc kubenswrapper[4871]: E1126 05:56:00.509047 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.543694 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl"] Nov 26 05:56:10 crc kubenswrapper[4871]: E1126 05:56:10.546312 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="283281c5-37d5-4b0f-9824-13ffec29ddaf" containerName="installer" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.546333 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="283281c5-37d5-4b0f-9824-13ffec29ddaf" containerName="installer" Nov 26 05:56:10 crc kubenswrapper[4871]: E1126 05:56:10.546355 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.546364 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 05:56:10 crc kubenswrapper[4871]: E1126 05:56:10.546395 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.546407 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.546685 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.546710 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="283281c5-37d5-4b0f-9824-13ffec29ddaf" containerName="installer" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.546741 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.547570 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.550005 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.550107 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-pjzlp" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.550175 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.550347 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.555507 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl"] Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.678933 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g58d9\" (UniqueName: \"kubernetes.io/projected/ff26f53b-8fe4-4dde-b475-348beb78046d-kube-api-access-g58d9\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl\" (UID: \"ff26f53b-8fe4-4dde-b475-348beb78046d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.678984 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff26f53b-8fe4-4dde-b475-348beb78046d-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl\" (UID: \"ff26f53b-8fe4-4dde-b475-348beb78046d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.679093 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff26f53b-8fe4-4dde-b475-348beb78046d-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl\" (UID: \"ff26f53b-8fe4-4dde-b475-348beb78046d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.780696 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g58d9\" (UniqueName: \"kubernetes.io/projected/ff26f53b-8fe4-4dde-b475-348beb78046d-kube-api-access-g58d9\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl\" (UID: \"ff26f53b-8fe4-4dde-b475-348beb78046d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.780740 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff26f53b-8fe4-4dde-b475-348beb78046d-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl\" (UID: \"ff26f53b-8fe4-4dde-b475-348beb78046d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.780771 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff26f53b-8fe4-4dde-b475-348beb78046d-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl\" (UID: \"ff26f53b-8fe4-4dde-b475-348beb78046d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.787161 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff26f53b-8fe4-4dde-b475-348beb78046d-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl\" (UID: \"ff26f53b-8fe4-4dde-b475-348beb78046d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.787829 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff26f53b-8fe4-4dde-b475-348beb78046d-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl\" (UID: \"ff26f53b-8fe4-4dde-b475-348beb78046d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.803054 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g58d9\" (UniqueName: \"kubernetes.io/projected/ff26f53b-8fe4-4dde-b475-348beb78046d-kube-api-access-g58d9\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl\" (UID: \"ff26f53b-8fe4-4dde-b475-348beb78046d\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl" Nov 26 05:56:10 crc kubenswrapper[4871]: I1126 05:56:10.868289 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl" Nov 26 05:56:11 crc kubenswrapper[4871]: I1126 05:56:11.489833 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl"] Nov 26 05:56:12 crc kubenswrapper[4871]: I1126 05:56:12.269389 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl" event={"ID":"ff26f53b-8fe4-4dde-b475-348beb78046d","Type":"ContainerStarted","Data":"1d4526db71323df73b3bf3a8708fc9e7c52909710307a297ba503c372f427813"} Nov 26 05:56:13 crc kubenswrapper[4871]: I1126 05:56:13.280826 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl" event={"ID":"ff26f53b-8fe4-4dde-b475-348beb78046d","Type":"ContainerStarted","Data":"bf189b94546228c8fa69793adabb30522f0c2abbda3dac74fe31541762db5021"} Nov 26 05:56:13 crc kubenswrapper[4871]: I1126 05:56:13.303667 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl" podStartSLOduration=2.79584358 podStartE2EDuration="3.303646144s" podCreationTimestamp="2025-11-26 05:56:10 +0000 UTC" firstStartedPulling="2025-11-26 05:56:11.514039592 +0000 UTC m=+1829.697091188" lastFinishedPulling="2025-11-26 05:56:12.021842166 +0000 UTC m=+1830.204893752" observedRunningTime="2025-11-26 05:56:13.297734087 +0000 UTC m=+1831.480785673" watchObservedRunningTime="2025-11-26 05:56:13.303646144 +0000 UTC m=+1831.486697740" Nov 26 05:56:15 crc kubenswrapper[4871]: I1126 05:56:15.508397 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:56:15 crc kubenswrapper[4871]: E1126 05:56:15.509019 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 05:56:19 crc kubenswrapper[4871]: I1126 05:56:19.057895 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-l92kl"] Nov 26 05:56:19 crc kubenswrapper[4871]: I1126 05:56:19.072375 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-l92kl"] Nov 26 05:56:20 crc kubenswrapper[4871]: I1126 05:56:20.032222 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-22j7b"] Nov 26 05:56:20 crc kubenswrapper[4871]: I1126 05:56:20.046645 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-22j7b"] Nov 26 05:56:20 crc kubenswrapper[4871]: I1126 05:56:20.534280 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f388a8a-48e4-4503-a842-23c380a1c649" path="/var/lib/kubelet/pods/8f388a8a-48e4-4503-a842-23c380a1c649/volumes" Nov 26 05:56:20 crc kubenswrapper[4871]: I1126 05:56:20.535248 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc3703e8-ee84-4c63-983b-a1f0ea6976f1" path="/var/lib/kubelet/pods/fc3703e8-ee84-4c63-983b-a1f0ea6976f1/volumes" Nov 26 05:56:21 crc kubenswrapper[4871]: I1126 05:56:21.919386 4871 scope.go:117] "RemoveContainer" containerID="5b086dbe6d815cb7b39d7cbbe9bbf31136fc2199d8f5f03f88cf1d73066649db" Nov 26 05:56:21 crc kubenswrapper[4871]: I1126 05:56:21.944644 4871 scope.go:117] "RemoveContainer" containerID="c20c3c89269396c982f3c2b18f81f453cf0b902607d7efaafef8ce3eed1c4acd" Nov 26 05:56:22 crc kubenswrapper[4871]: I1126 05:56:22.006492 4871 scope.go:117] "RemoveContainer" containerID="51e4c2a3c34347cc9248d4b1233df6cfaf2e3cb0192befb46dc1e2d0dfb1190a" Nov 26 05:56:22 crc kubenswrapper[4871]: I1126 05:56:22.066954 4871 scope.go:117] "RemoveContainer" containerID="53ab1f13208b324ec2c352f7b58f323415a3adda284c7b3e7124f03afa93fb3e" Nov 26 05:56:22 crc kubenswrapper[4871]: I1126 05:56:22.149939 4871 scope.go:117] "RemoveContainer" containerID="6849d4a873e1e5d2caa159f5a3c1ade14bdc815457cee33ed53a41427dd223be" Nov 26 05:56:22 crc kubenswrapper[4871]: I1126 05:56:22.182254 4871 scope.go:117] "RemoveContainer" containerID="92a185927ee0ba29556a3c7ac25362b8286f1ffca3c6c6170f225b7bdd89316e" Nov 26 05:56:22 crc kubenswrapper[4871]: I1126 05:56:22.237282 4871 scope.go:117] "RemoveContainer" containerID="0b3f41dc451b2d5092df4530c819384ade2cd6e8bcc1cc5cc6ed1ee49c17ce9e" Nov 26 05:56:22 crc kubenswrapper[4871]: I1126 05:56:22.276815 4871 scope.go:117] "RemoveContainer" containerID="7449fc98f53ce5ac1e6441d6e3940f69dc80e9702a7a8c5beb1f1a588f708c39" Nov 26 05:56:22 crc kubenswrapper[4871]: I1126 05:56:22.319943 4871 scope.go:117] "RemoveContainer" containerID="173ab77996bfa5ec292feb115b28f5e6425124aff186a3bb2422bbca36d975df" Nov 26 05:56:29 crc kubenswrapper[4871]: I1126 05:56:29.508662 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:56:30 crc kubenswrapper[4871]: I1126 05:56:30.501582 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"690714808ddb6f01775f9048b33eb3edf7f436bcc65bc1eb71c8be346f112ef0"} Nov 26 05:57:02 crc kubenswrapper[4871]: I1126 05:57:02.063451 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-qz2zl"] Nov 26 05:57:02 crc kubenswrapper[4871]: I1126 05:57:02.074963 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-qz2zl"] Nov 26 05:57:02 crc kubenswrapper[4871]: I1126 05:57:02.521551 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="105ddc2e-1b44-4e32-ba25-1582b633faaa" path="/var/lib/kubelet/pods/105ddc2e-1b44-4e32-ba25-1582b633faaa/volumes" Nov 26 05:57:22 crc kubenswrapper[4871]: I1126 05:57:22.556065 4871 scope.go:117] "RemoveContainer" containerID="23edf9cdd99ee1bd02e0b63ed593e8d29079a97c3186e93a8486de34adfbd3d4" Nov 26 05:57:24 crc kubenswrapper[4871]: I1126 05:57:24.853718 4871 generic.go:334] "Generic (PLEG): container finished" podID="ff26f53b-8fe4-4dde-b475-348beb78046d" containerID="bf189b94546228c8fa69793adabb30522f0c2abbda3dac74fe31541762db5021" exitCode=0 Nov 26 05:57:24 crc kubenswrapper[4871]: I1126 05:57:24.853844 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl" event={"ID":"ff26f53b-8fe4-4dde-b475-348beb78046d","Type":"ContainerDied","Data":"bf189b94546228c8fa69793adabb30522f0c2abbda3dac74fe31541762db5021"} Nov 26 05:57:26 crc kubenswrapper[4871]: I1126 05:57:26.301214 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl" Nov 26 05:57:26 crc kubenswrapper[4871]: I1126 05:57:26.470309 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff26f53b-8fe4-4dde-b475-348beb78046d-inventory\") pod \"ff26f53b-8fe4-4dde-b475-348beb78046d\" (UID: \"ff26f53b-8fe4-4dde-b475-348beb78046d\") " Nov 26 05:57:26 crc kubenswrapper[4871]: I1126 05:57:26.470454 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g58d9\" (UniqueName: \"kubernetes.io/projected/ff26f53b-8fe4-4dde-b475-348beb78046d-kube-api-access-g58d9\") pod \"ff26f53b-8fe4-4dde-b475-348beb78046d\" (UID: \"ff26f53b-8fe4-4dde-b475-348beb78046d\") " Nov 26 05:57:26 crc kubenswrapper[4871]: I1126 05:57:26.470661 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff26f53b-8fe4-4dde-b475-348beb78046d-ssh-key\") pod \"ff26f53b-8fe4-4dde-b475-348beb78046d\" (UID: \"ff26f53b-8fe4-4dde-b475-348beb78046d\") " Nov 26 05:57:26 crc kubenswrapper[4871]: I1126 05:57:26.479858 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff26f53b-8fe4-4dde-b475-348beb78046d-kube-api-access-g58d9" (OuterVolumeSpecName: "kube-api-access-g58d9") pod "ff26f53b-8fe4-4dde-b475-348beb78046d" (UID: "ff26f53b-8fe4-4dde-b475-348beb78046d"). InnerVolumeSpecName "kube-api-access-g58d9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:57:26 crc kubenswrapper[4871]: I1126 05:57:26.515454 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff26f53b-8fe4-4dde-b475-348beb78046d-inventory" (OuterVolumeSpecName: "inventory") pod "ff26f53b-8fe4-4dde-b475-348beb78046d" (UID: "ff26f53b-8fe4-4dde-b475-348beb78046d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:57:26 crc kubenswrapper[4871]: I1126 05:57:26.539758 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff26f53b-8fe4-4dde-b475-348beb78046d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ff26f53b-8fe4-4dde-b475-348beb78046d" (UID: "ff26f53b-8fe4-4dde-b475-348beb78046d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:57:26 crc kubenswrapper[4871]: I1126 05:57:26.576135 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g58d9\" (UniqueName: \"kubernetes.io/projected/ff26f53b-8fe4-4dde-b475-348beb78046d-kube-api-access-g58d9\") on node \"crc\" DevicePath \"\"" Nov 26 05:57:26 crc kubenswrapper[4871]: I1126 05:57:26.576206 4871 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ff26f53b-8fe4-4dde-b475-348beb78046d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 05:57:26 crc kubenswrapper[4871]: I1126 05:57:26.576233 4871 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ff26f53b-8fe4-4dde-b475-348beb78046d-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 05:57:26 crc kubenswrapper[4871]: I1126 05:57:26.884717 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl" event={"ID":"ff26f53b-8fe4-4dde-b475-348beb78046d","Type":"ContainerDied","Data":"1d4526db71323df73b3bf3a8708fc9e7c52909710307a297ba503c372f427813"} Nov 26 05:57:26 crc kubenswrapper[4871]: I1126 05:57:26.884997 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1d4526db71323df73b3bf3a8708fc9e7c52909710307a297ba503c372f427813" Nov 26 05:57:26 crc kubenswrapper[4871]: I1126 05:57:26.884779 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl" Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.019118 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk"] Nov 26 05:57:27 crc kubenswrapper[4871]: E1126 05:57:27.020085 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff26f53b-8fe4-4dde-b475-348beb78046d" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.020141 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff26f53b-8fe4-4dde-b475-348beb78046d" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.020745 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff26f53b-8fe4-4dde-b475-348beb78046d" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.022278 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk" Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.025198 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.025463 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.025616 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-pjzlp" Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.025953 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.046276 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk"] Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.189879 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk\" (UID: \"a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk" Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.190172 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk\" (UID: \"a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk" Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.190327 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqgm7\" (UniqueName: \"kubernetes.io/projected/a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4-kube-api-access-cqgm7\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk\" (UID: \"a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk" Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.291551 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqgm7\" (UniqueName: \"kubernetes.io/projected/a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4-kube-api-access-cqgm7\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk\" (UID: \"a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk" Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.291593 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk\" (UID: \"a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk" Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.291708 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk\" (UID: \"a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk" Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.297641 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk\" (UID: \"a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk" Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.299240 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk\" (UID: \"a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk" Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.320910 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqgm7\" (UniqueName: \"kubernetes.io/projected/a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4-kube-api-access-cqgm7\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk\" (UID: \"a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk" Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.345431 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk" Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.696601 4871 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.702311 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk"] Nov 26 05:57:27 crc kubenswrapper[4871]: I1126 05:57:27.910115 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk" event={"ID":"a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4","Type":"ContainerStarted","Data":"81c963d8e1cd3c3d557e0924b7f7eb17dbad69c81271cc5469c9f04db880c50c"} Nov 26 05:57:28 crc kubenswrapper[4871]: I1126 05:57:28.923117 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk" event={"ID":"a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4","Type":"ContainerStarted","Data":"9258e04c005b30a2a689140255eaf7e77c81155125ba5a97a3a469462b81bde3"} Nov 26 05:57:28 crc kubenswrapper[4871]: I1126 05:57:28.960294 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk" podStartSLOduration=2.499018905 podStartE2EDuration="2.96024261s" podCreationTimestamp="2025-11-26 05:57:26 +0000 UTC" firstStartedPulling="2025-11-26 05:57:27.696288438 +0000 UTC m=+1905.879340024" lastFinishedPulling="2025-11-26 05:57:28.157512143 +0000 UTC m=+1906.340563729" observedRunningTime="2025-11-26 05:57:28.943907213 +0000 UTC m=+1907.126958849" watchObservedRunningTime="2025-11-26 05:57:28.96024261 +0000 UTC m=+1907.143294226" Nov 26 05:57:33 crc kubenswrapper[4871]: I1126 05:57:33.978636 4871 generic.go:334] "Generic (PLEG): container finished" podID="a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4" containerID="9258e04c005b30a2a689140255eaf7e77c81155125ba5a97a3a469462b81bde3" exitCode=0 Nov 26 05:57:33 crc kubenswrapper[4871]: I1126 05:57:33.978711 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk" event={"ID":"a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4","Type":"ContainerDied","Data":"9258e04c005b30a2a689140255eaf7e77c81155125ba5a97a3a469462b81bde3"} Nov 26 05:57:35 crc kubenswrapper[4871]: I1126 05:57:35.502317 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk" Nov 26 05:57:35 crc kubenswrapper[4871]: I1126 05:57:35.680622 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4-ssh-key\") pod \"a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4\" (UID: \"a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4\") " Nov 26 05:57:35 crc kubenswrapper[4871]: I1126 05:57:35.680786 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4-inventory\") pod \"a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4\" (UID: \"a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4\") " Nov 26 05:57:35 crc kubenswrapper[4871]: I1126 05:57:35.680863 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqgm7\" (UniqueName: \"kubernetes.io/projected/a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4-kube-api-access-cqgm7\") pod \"a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4\" (UID: \"a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4\") " Nov 26 05:57:35 crc kubenswrapper[4871]: I1126 05:57:35.686490 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4-kube-api-access-cqgm7" (OuterVolumeSpecName: "kube-api-access-cqgm7") pod "a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4" (UID: "a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4"). InnerVolumeSpecName "kube-api-access-cqgm7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:57:35 crc kubenswrapper[4871]: I1126 05:57:35.709897 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4" (UID: "a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:57:35 crc kubenswrapper[4871]: I1126 05:57:35.727279 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4-inventory" (OuterVolumeSpecName: "inventory") pod "a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4" (UID: "a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:57:35 crc kubenswrapper[4871]: I1126 05:57:35.782926 4871 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 05:57:35 crc kubenswrapper[4871]: I1126 05:57:35.782965 4871 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 05:57:35 crc kubenswrapper[4871]: I1126 05:57:35.782979 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqgm7\" (UniqueName: \"kubernetes.io/projected/a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4-kube-api-access-cqgm7\") on node \"crc\" DevicePath \"\"" Nov 26 05:57:35 crc kubenswrapper[4871]: I1126 05:57:35.999422 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk" event={"ID":"a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4","Type":"ContainerDied","Data":"81c963d8e1cd3c3d557e0924b7f7eb17dbad69c81271cc5469c9f04db880c50c"} Nov 26 05:57:35 crc kubenswrapper[4871]: I1126 05:57:35.999464 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="81c963d8e1cd3c3d557e0924b7f7eb17dbad69c81271cc5469c9f04db880c50c" Nov 26 05:57:35 crc kubenswrapper[4871]: I1126 05:57:35.999546 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk" Nov 26 05:57:36 crc kubenswrapper[4871]: I1126 05:57:36.115122 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn"] Nov 26 05:57:36 crc kubenswrapper[4871]: E1126 05:57:36.115502 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 26 05:57:36 crc kubenswrapper[4871]: I1126 05:57:36.115542 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 26 05:57:36 crc kubenswrapper[4871]: I1126 05:57:36.115776 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 26 05:57:36 crc kubenswrapper[4871]: I1126 05:57:36.116678 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn" Nov 26 05:57:36 crc kubenswrapper[4871]: I1126 05:57:36.118595 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-pjzlp" Nov 26 05:57:36 crc kubenswrapper[4871]: I1126 05:57:36.118723 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 05:57:36 crc kubenswrapper[4871]: I1126 05:57:36.118940 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 05:57:36 crc kubenswrapper[4871]: I1126 05:57:36.120290 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 05:57:36 crc kubenswrapper[4871]: I1126 05:57:36.136422 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn"] Nov 26 05:57:36 crc kubenswrapper[4871]: I1126 05:57:36.294092 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8fa0c629-09c7-43d9-964c-37320a475595-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-99lcn\" (UID: \"8fa0c629-09c7-43d9-964c-37320a475595\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn" Nov 26 05:57:36 crc kubenswrapper[4871]: I1126 05:57:36.294376 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zs2cm\" (UniqueName: \"kubernetes.io/projected/8fa0c629-09c7-43d9-964c-37320a475595-kube-api-access-zs2cm\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-99lcn\" (UID: \"8fa0c629-09c7-43d9-964c-37320a475595\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn" Nov 26 05:57:36 crc kubenswrapper[4871]: I1126 05:57:36.294424 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8fa0c629-09c7-43d9-964c-37320a475595-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-99lcn\" (UID: \"8fa0c629-09c7-43d9-964c-37320a475595\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn" Nov 26 05:57:36 crc kubenswrapper[4871]: I1126 05:57:36.416888 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zs2cm\" (UniqueName: \"kubernetes.io/projected/8fa0c629-09c7-43d9-964c-37320a475595-kube-api-access-zs2cm\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-99lcn\" (UID: \"8fa0c629-09c7-43d9-964c-37320a475595\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn" Nov 26 05:57:36 crc kubenswrapper[4871]: I1126 05:57:36.416956 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8fa0c629-09c7-43d9-964c-37320a475595-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-99lcn\" (UID: \"8fa0c629-09c7-43d9-964c-37320a475595\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn" Nov 26 05:57:36 crc kubenswrapper[4871]: I1126 05:57:36.417098 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8fa0c629-09c7-43d9-964c-37320a475595-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-99lcn\" (UID: \"8fa0c629-09c7-43d9-964c-37320a475595\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn" Nov 26 05:57:36 crc kubenswrapper[4871]: I1126 05:57:36.421914 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8fa0c629-09c7-43d9-964c-37320a475595-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-99lcn\" (UID: \"8fa0c629-09c7-43d9-964c-37320a475595\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn" Nov 26 05:57:36 crc kubenswrapper[4871]: I1126 05:57:36.421927 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8fa0c629-09c7-43d9-964c-37320a475595-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-99lcn\" (UID: \"8fa0c629-09c7-43d9-964c-37320a475595\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn" Nov 26 05:57:36 crc kubenswrapper[4871]: I1126 05:57:36.444489 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zs2cm\" (UniqueName: \"kubernetes.io/projected/8fa0c629-09c7-43d9-964c-37320a475595-kube-api-access-zs2cm\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-99lcn\" (UID: \"8fa0c629-09c7-43d9-964c-37320a475595\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn" Nov 26 05:57:36 crc kubenswrapper[4871]: I1126 05:57:36.740218 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn" Nov 26 05:57:37 crc kubenswrapper[4871]: I1126 05:57:37.296111 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn"] Nov 26 05:57:38 crc kubenswrapper[4871]: I1126 05:57:38.026138 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn" event={"ID":"8fa0c629-09c7-43d9-964c-37320a475595","Type":"ContainerStarted","Data":"7517e1b96335de4e56ea6ce79c5596623069b20575f3cf00e1000b0e59f0c373"} Nov 26 05:57:39 crc kubenswrapper[4871]: I1126 05:57:39.040359 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn" event={"ID":"8fa0c629-09c7-43d9-964c-37320a475595","Type":"ContainerStarted","Data":"27e1f5cf41a322b236a049dc0bc28e48322baa10db0178ebc1713972dc29d55d"} Nov 26 05:57:39 crc kubenswrapper[4871]: I1126 05:57:39.070851 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn" podStartSLOduration=2.5505476160000002 podStartE2EDuration="3.070814003s" podCreationTimestamp="2025-11-26 05:57:36 +0000 UTC" firstStartedPulling="2025-11-26 05:57:37.299859315 +0000 UTC m=+1915.482910891" lastFinishedPulling="2025-11-26 05:57:37.820125652 +0000 UTC m=+1916.003177278" observedRunningTime="2025-11-26 05:57:39.065650524 +0000 UTC m=+1917.248702120" watchObservedRunningTime="2025-11-26 05:57:39.070814003 +0000 UTC m=+1917.253865609" Nov 26 05:58:19 crc kubenswrapper[4871]: I1126 05:58:19.512472 4871 generic.go:334] "Generic (PLEG): container finished" podID="8fa0c629-09c7-43d9-964c-37320a475595" containerID="27e1f5cf41a322b236a049dc0bc28e48322baa10db0178ebc1713972dc29d55d" exitCode=0 Nov 26 05:58:19 crc kubenswrapper[4871]: I1126 05:58:19.512556 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn" event={"ID":"8fa0c629-09c7-43d9-964c-37320a475595","Type":"ContainerDied","Data":"27e1f5cf41a322b236a049dc0bc28e48322baa10db0178ebc1713972dc29d55d"} Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.044930 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.069633 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8fa0c629-09c7-43d9-964c-37320a475595-ssh-key\") pod \"8fa0c629-09c7-43d9-964c-37320a475595\" (UID: \"8fa0c629-09c7-43d9-964c-37320a475595\") " Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.069697 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8fa0c629-09c7-43d9-964c-37320a475595-inventory\") pod \"8fa0c629-09c7-43d9-964c-37320a475595\" (UID: \"8fa0c629-09c7-43d9-964c-37320a475595\") " Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.069923 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zs2cm\" (UniqueName: \"kubernetes.io/projected/8fa0c629-09c7-43d9-964c-37320a475595-kube-api-access-zs2cm\") pod \"8fa0c629-09c7-43d9-964c-37320a475595\" (UID: \"8fa0c629-09c7-43d9-964c-37320a475595\") " Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.078621 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fa0c629-09c7-43d9-964c-37320a475595-kube-api-access-zs2cm" (OuterVolumeSpecName: "kube-api-access-zs2cm") pod "8fa0c629-09c7-43d9-964c-37320a475595" (UID: "8fa0c629-09c7-43d9-964c-37320a475595"). InnerVolumeSpecName "kube-api-access-zs2cm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.103861 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8fa0c629-09c7-43d9-964c-37320a475595-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8fa0c629-09c7-43d9-964c-37320a475595" (UID: "8fa0c629-09c7-43d9-964c-37320a475595"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.107345 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8fa0c629-09c7-43d9-964c-37320a475595-inventory" (OuterVolumeSpecName: "inventory") pod "8fa0c629-09c7-43d9-964c-37320a475595" (UID: "8fa0c629-09c7-43d9-964c-37320a475595"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.173132 4871 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8fa0c629-09c7-43d9-964c-37320a475595-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.173182 4871 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8fa0c629-09c7-43d9-964c-37320a475595-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.173202 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zs2cm\" (UniqueName: \"kubernetes.io/projected/8fa0c629-09c7-43d9-964c-37320a475595-kube-api-access-zs2cm\") on node \"crc\" DevicePath \"\"" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.533507 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn" event={"ID":"8fa0c629-09c7-43d9-964c-37320a475595","Type":"ContainerDied","Data":"7517e1b96335de4e56ea6ce79c5596623069b20575f3cf00e1000b0e59f0c373"} Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.533564 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7517e1b96335de4e56ea6ce79c5596623069b20575f3cf00e1000b0e59f0c373" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.533614 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-99lcn" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.633646 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt"] Nov 26 05:58:21 crc kubenswrapper[4871]: E1126 05:58:21.634052 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fa0c629-09c7-43d9-964c-37320a475595" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.634070 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fa0c629-09c7-43d9-964c-37320a475595" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.634285 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fa0c629-09c7-43d9-964c-37320a475595" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.635013 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.637654 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.638322 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.641284 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-pjzlp" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.641579 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.661575 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt"] Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.684157 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ps9bp\" (UniqueName: \"kubernetes.io/projected/46746b5b-e35a-452a-bdad-12b497a8c3b0-kube-api-access-ps9bp\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt\" (UID: \"46746b5b-e35a-452a-bdad-12b497a8c3b0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.684731 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/46746b5b-e35a-452a-bdad-12b497a8c3b0-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt\" (UID: \"46746b5b-e35a-452a-bdad-12b497a8c3b0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.684881 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/46746b5b-e35a-452a-bdad-12b497a8c3b0-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt\" (UID: \"46746b5b-e35a-452a-bdad-12b497a8c3b0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.786636 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/46746b5b-e35a-452a-bdad-12b497a8c3b0-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt\" (UID: \"46746b5b-e35a-452a-bdad-12b497a8c3b0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.786884 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ps9bp\" (UniqueName: \"kubernetes.io/projected/46746b5b-e35a-452a-bdad-12b497a8c3b0-kube-api-access-ps9bp\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt\" (UID: \"46746b5b-e35a-452a-bdad-12b497a8c3b0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.786936 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/46746b5b-e35a-452a-bdad-12b497a8c3b0-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt\" (UID: \"46746b5b-e35a-452a-bdad-12b497a8c3b0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.793411 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/46746b5b-e35a-452a-bdad-12b497a8c3b0-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt\" (UID: \"46746b5b-e35a-452a-bdad-12b497a8c3b0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.793489 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/46746b5b-e35a-452a-bdad-12b497a8c3b0-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt\" (UID: \"46746b5b-e35a-452a-bdad-12b497a8c3b0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.812626 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ps9bp\" (UniqueName: \"kubernetes.io/projected/46746b5b-e35a-452a-bdad-12b497a8c3b0-kube-api-access-ps9bp\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt\" (UID: \"46746b5b-e35a-452a-bdad-12b497a8c3b0\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt" Nov 26 05:58:21 crc kubenswrapper[4871]: I1126 05:58:21.951073 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt" Nov 26 05:58:22 crc kubenswrapper[4871]: I1126 05:58:22.502285 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt"] Nov 26 05:58:22 crc kubenswrapper[4871]: W1126 05:58:22.505959 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod46746b5b_e35a_452a_bdad_12b497a8c3b0.slice/crio-cc152c2c23387cfdc9c730c4f9e5cb8574bd656f666e0c2a495e9ae3fddb3b07 WatchSource:0}: Error finding container cc152c2c23387cfdc9c730c4f9e5cb8574bd656f666e0c2a495e9ae3fddb3b07: Status 404 returned error can't find the container with id cc152c2c23387cfdc9c730c4f9e5cb8574bd656f666e0c2a495e9ae3fddb3b07 Nov 26 05:58:22 crc kubenswrapper[4871]: I1126 05:58:22.546681 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt" event={"ID":"46746b5b-e35a-452a-bdad-12b497a8c3b0","Type":"ContainerStarted","Data":"cc152c2c23387cfdc9c730c4f9e5cb8574bd656f666e0c2a495e9ae3fddb3b07"} Nov 26 05:58:23 crc kubenswrapper[4871]: I1126 05:58:23.559439 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt" event={"ID":"46746b5b-e35a-452a-bdad-12b497a8c3b0","Type":"ContainerStarted","Data":"3d45f12e23b4e5c6d9a8d33bcf6c6bcaed38272dab71cc1a288e91def8acbc9a"} Nov 26 05:58:23 crc kubenswrapper[4871]: I1126 05:58:23.589450 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt" podStartSLOduration=2.088475186 podStartE2EDuration="2.589428182s" podCreationTimestamp="2025-11-26 05:58:21 +0000 UTC" firstStartedPulling="2025-11-26 05:58:22.509059405 +0000 UTC m=+1960.692111011" lastFinishedPulling="2025-11-26 05:58:23.010012431 +0000 UTC m=+1961.193064007" observedRunningTime="2025-11-26 05:58:23.576213473 +0000 UTC m=+1961.759265089" watchObservedRunningTime="2025-11-26 05:58:23.589428182 +0000 UTC m=+1961.772479778" Nov 26 05:58:53 crc kubenswrapper[4871]: I1126 05:58:53.615085 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:58:53 crc kubenswrapper[4871]: I1126 05:58:53.615868 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:59:19 crc kubenswrapper[4871]: I1126 05:59:19.192619 4871 generic.go:334] "Generic (PLEG): container finished" podID="46746b5b-e35a-452a-bdad-12b497a8c3b0" containerID="3d45f12e23b4e5c6d9a8d33bcf6c6bcaed38272dab71cc1a288e91def8acbc9a" exitCode=0 Nov 26 05:59:19 crc kubenswrapper[4871]: I1126 05:59:19.192704 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt" event={"ID":"46746b5b-e35a-452a-bdad-12b497a8c3b0","Type":"ContainerDied","Data":"3d45f12e23b4e5c6d9a8d33bcf6c6bcaed38272dab71cc1a288e91def8acbc9a"} Nov 26 05:59:20 crc kubenswrapper[4871]: I1126 05:59:20.702821 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt" Nov 26 05:59:20 crc kubenswrapper[4871]: I1126 05:59:20.839043 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/46746b5b-e35a-452a-bdad-12b497a8c3b0-ssh-key\") pod \"46746b5b-e35a-452a-bdad-12b497a8c3b0\" (UID: \"46746b5b-e35a-452a-bdad-12b497a8c3b0\") " Nov 26 05:59:20 crc kubenswrapper[4871]: I1126 05:59:20.839364 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/46746b5b-e35a-452a-bdad-12b497a8c3b0-inventory\") pod \"46746b5b-e35a-452a-bdad-12b497a8c3b0\" (UID: \"46746b5b-e35a-452a-bdad-12b497a8c3b0\") " Nov 26 05:59:20 crc kubenswrapper[4871]: I1126 05:59:20.839451 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ps9bp\" (UniqueName: \"kubernetes.io/projected/46746b5b-e35a-452a-bdad-12b497a8c3b0-kube-api-access-ps9bp\") pod \"46746b5b-e35a-452a-bdad-12b497a8c3b0\" (UID: \"46746b5b-e35a-452a-bdad-12b497a8c3b0\") " Nov 26 05:59:20 crc kubenswrapper[4871]: I1126 05:59:20.844642 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46746b5b-e35a-452a-bdad-12b497a8c3b0-kube-api-access-ps9bp" (OuterVolumeSpecName: "kube-api-access-ps9bp") pod "46746b5b-e35a-452a-bdad-12b497a8c3b0" (UID: "46746b5b-e35a-452a-bdad-12b497a8c3b0"). InnerVolumeSpecName "kube-api-access-ps9bp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:59:20 crc kubenswrapper[4871]: I1126 05:59:20.867359 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46746b5b-e35a-452a-bdad-12b497a8c3b0-inventory" (OuterVolumeSpecName: "inventory") pod "46746b5b-e35a-452a-bdad-12b497a8c3b0" (UID: "46746b5b-e35a-452a-bdad-12b497a8c3b0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:59:20 crc kubenswrapper[4871]: I1126 05:59:20.872745 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46746b5b-e35a-452a-bdad-12b497a8c3b0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "46746b5b-e35a-452a-bdad-12b497a8c3b0" (UID: "46746b5b-e35a-452a-bdad-12b497a8c3b0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:59:20 crc kubenswrapper[4871]: I1126 05:59:20.942848 4871 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/46746b5b-e35a-452a-bdad-12b497a8c3b0-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 05:59:20 crc kubenswrapper[4871]: I1126 05:59:20.942909 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ps9bp\" (UniqueName: \"kubernetes.io/projected/46746b5b-e35a-452a-bdad-12b497a8c3b0-kube-api-access-ps9bp\") on node \"crc\" DevicePath \"\"" Nov 26 05:59:20 crc kubenswrapper[4871]: I1126 05:59:20.942934 4871 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/46746b5b-e35a-452a-bdad-12b497a8c3b0-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.226851 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt" event={"ID":"46746b5b-e35a-452a-bdad-12b497a8c3b0","Type":"ContainerDied","Data":"cc152c2c23387cfdc9c730c4f9e5cb8574bd656f666e0c2a495e9ae3fddb3b07"} Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.226917 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cc152c2c23387cfdc9c730c4f9e5cb8574bd656f666e0c2a495e9ae3fddb3b07" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.226934 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.357338 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-d5dz5"] Nov 26 05:59:21 crc kubenswrapper[4871]: E1126 05:59:21.357860 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46746b5b-e35a-452a-bdad-12b497a8c3b0" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.357882 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="46746b5b-e35a-452a-bdad-12b497a8c3b0" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.358115 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="46746b5b-e35a-452a-bdad-12b497a8c3b0" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.360739 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-d5dz5" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.371086 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.371400 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.371696 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-pjzlp" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.371950 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.373296 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-d5dz5"] Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.455207 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/0a3a838b-5101-4706-a5d9-50fc5797ba72-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-d5dz5\" (UID: \"0a3a838b-5101-4706-a5d9-50fc5797ba72\") " pod="openstack/ssh-known-hosts-edpm-deployment-d5dz5" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.455403 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpb8l\" (UniqueName: \"kubernetes.io/projected/0a3a838b-5101-4706-a5d9-50fc5797ba72-kube-api-access-lpb8l\") pod \"ssh-known-hosts-edpm-deployment-d5dz5\" (UID: \"0a3a838b-5101-4706-a5d9-50fc5797ba72\") " pod="openstack/ssh-known-hosts-edpm-deployment-d5dz5" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.455629 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0a3a838b-5101-4706-a5d9-50fc5797ba72-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-d5dz5\" (UID: \"0a3a838b-5101-4706-a5d9-50fc5797ba72\") " pod="openstack/ssh-known-hosts-edpm-deployment-d5dz5" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.557732 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0a3a838b-5101-4706-a5d9-50fc5797ba72-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-d5dz5\" (UID: \"0a3a838b-5101-4706-a5d9-50fc5797ba72\") " pod="openstack/ssh-known-hosts-edpm-deployment-d5dz5" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.557850 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/0a3a838b-5101-4706-a5d9-50fc5797ba72-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-d5dz5\" (UID: \"0a3a838b-5101-4706-a5d9-50fc5797ba72\") " pod="openstack/ssh-known-hosts-edpm-deployment-d5dz5" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.557910 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpb8l\" (UniqueName: \"kubernetes.io/projected/0a3a838b-5101-4706-a5d9-50fc5797ba72-kube-api-access-lpb8l\") pod \"ssh-known-hosts-edpm-deployment-d5dz5\" (UID: \"0a3a838b-5101-4706-a5d9-50fc5797ba72\") " pod="openstack/ssh-known-hosts-edpm-deployment-d5dz5" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.562621 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0a3a838b-5101-4706-a5d9-50fc5797ba72-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-d5dz5\" (UID: \"0a3a838b-5101-4706-a5d9-50fc5797ba72\") " pod="openstack/ssh-known-hosts-edpm-deployment-d5dz5" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.574004 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/0a3a838b-5101-4706-a5d9-50fc5797ba72-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-d5dz5\" (UID: \"0a3a838b-5101-4706-a5d9-50fc5797ba72\") " pod="openstack/ssh-known-hosts-edpm-deployment-d5dz5" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.581644 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpb8l\" (UniqueName: \"kubernetes.io/projected/0a3a838b-5101-4706-a5d9-50fc5797ba72-kube-api-access-lpb8l\") pod \"ssh-known-hosts-edpm-deployment-d5dz5\" (UID: \"0a3a838b-5101-4706-a5d9-50fc5797ba72\") " pod="openstack/ssh-known-hosts-edpm-deployment-d5dz5" Nov 26 05:59:21 crc kubenswrapper[4871]: I1126 05:59:21.697311 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-d5dz5" Nov 26 05:59:22 crc kubenswrapper[4871]: I1126 05:59:22.290778 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-d5dz5"] Nov 26 05:59:22 crc kubenswrapper[4871]: W1126 05:59:22.294514 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a3a838b_5101_4706_a5d9_50fc5797ba72.slice/crio-1dd7681f10036fd2ad7ab6ecad16166b04ae2993f37cbe99a7ab60beac5b21a3 WatchSource:0}: Error finding container 1dd7681f10036fd2ad7ab6ecad16166b04ae2993f37cbe99a7ab60beac5b21a3: Status 404 returned error can't find the container with id 1dd7681f10036fd2ad7ab6ecad16166b04ae2993f37cbe99a7ab60beac5b21a3 Nov 26 05:59:23 crc kubenswrapper[4871]: I1126 05:59:23.255397 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-d5dz5" event={"ID":"0a3a838b-5101-4706-a5d9-50fc5797ba72","Type":"ContainerStarted","Data":"8d94436a53bc966f825ba6db74f8f2d18e28d084c346b3ffaea8397685a891f3"} Nov 26 05:59:23 crc kubenswrapper[4871]: I1126 05:59:23.255925 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-d5dz5" event={"ID":"0a3a838b-5101-4706-a5d9-50fc5797ba72","Type":"ContainerStarted","Data":"1dd7681f10036fd2ad7ab6ecad16166b04ae2993f37cbe99a7ab60beac5b21a3"} Nov 26 05:59:23 crc kubenswrapper[4871]: I1126 05:59:23.302482 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-d5dz5" podStartSLOduration=1.822274108 podStartE2EDuration="2.302448809s" podCreationTimestamp="2025-11-26 05:59:21 +0000 UTC" firstStartedPulling="2025-11-26 05:59:22.298865373 +0000 UTC m=+2020.481916999" lastFinishedPulling="2025-11-26 05:59:22.779040074 +0000 UTC m=+2020.962091700" observedRunningTime="2025-11-26 05:59:23.277329085 +0000 UTC m=+2021.460380721" watchObservedRunningTime="2025-11-26 05:59:23.302448809 +0000 UTC m=+2021.485500445" Nov 26 05:59:23 crc kubenswrapper[4871]: I1126 05:59:23.615365 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:59:23 crc kubenswrapper[4871]: I1126 05:59:23.615441 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:59:30 crc kubenswrapper[4871]: I1126 05:59:30.337225 4871 generic.go:334] "Generic (PLEG): container finished" podID="0a3a838b-5101-4706-a5d9-50fc5797ba72" containerID="8d94436a53bc966f825ba6db74f8f2d18e28d084c346b3ffaea8397685a891f3" exitCode=0 Nov 26 05:59:30 crc kubenswrapper[4871]: I1126 05:59:30.337283 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-d5dz5" event={"ID":"0a3a838b-5101-4706-a5d9-50fc5797ba72","Type":"ContainerDied","Data":"8d94436a53bc966f825ba6db74f8f2d18e28d084c346b3ffaea8397685a891f3"} Nov 26 05:59:31 crc kubenswrapper[4871]: I1126 05:59:31.919971 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-d5dz5" Nov 26 05:59:31 crc kubenswrapper[4871]: I1126 05:59:31.989030 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0a3a838b-5101-4706-a5d9-50fc5797ba72-ssh-key-openstack-edpm-ipam\") pod \"0a3a838b-5101-4706-a5d9-50fc5797ba72\" (UID: \"0a3a838b-5101-4706-a5d9-50fc5797ba72\") " Nov 26 05:59:31 crc kubenswrapper[4871]: I1126 05:59:31.989109 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/0a3a838b-5101-4706-a5d9-50fc5797ba72-inventory-0\") pod \"0a3a838b-5101-4706-a5d9-50fc5797ba72\" (UID: \"0a3a838b-5101-4706-a5d9-50fc5797ba72\") " Nov 26 05:59:31 crc kubenswrapper[4871]: I1126 05:59:31.989199 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lpb8l\" (UniqueName: \"kubernetes.io/projected/0a3a838b-5101-4706-a5d9-50fc5797ba72-kube-api-access-lpb8l\") pod \"0a3a838b-5101-4706-a5d9-50fc5797ba72\" (UID: \"0a3a838b-5101-4706-a5d9-50fc5797ba72\") " Nov 26 05:59:31 crc kubenswrapper[4871]: I1126 05:59:31.994313 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a3a838b-5101-4706-a5d9-50fc5797ba72-kube-api-access-lpb8l" (OuterVolumeSpecName: "kube-api-access-lpb8l") pod "0a3a838b-5101-4706-a5d9-50fc5797ba72" (UID: "0a3a838b-5101-4706-a5d9-50fc5797ba72"). InnerVolumeSpecName "kube-api-access-lpb8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.034406 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a3a838b-5101-4706-a5d9-50fc5797ba72-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "0a3a838b-5101-4706-a5d9-50fc5797ba72" (UID: "0a3a838b-5101-4706-a5d9-50fc5797ba72"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.037951 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a3a838b-5101-4706-a5d9-50fc5797ba72-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "0a3a838b-5101-4706-a5d9-50fc5797ba72" (UID: "0a3a838b-5101-4706-a5d9-50fc5797ba72"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.091911 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lpb8l\" (UniqueName: \"kubernetes.io/projected/0a3a838b-5101-4706-a5d9-50fc5797ba72-kube-api-access-lpb8l\") on node \"crc\" DevicePath \"\"" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.091952 4871 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0a3a838b-5101-4706-a5d9-50fc5797ba72-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.091962 4871 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/0a3a838b-5101-4706-a5d9-50fc5797ba72-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.365631 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-d5dz5" event={"ID":"0a3a838b-5101-4706-a5d9-50fc5797ba72","Type":"ContainerDied","Data":"1dd7681f10036fd2ad7ab6ecad16166b04ae2993f37cbe99a7ab60beac5b21a3"} Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.365701 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1dd7681f10036fd2ad7ab6ecad16166b04ae2993f37cbe99a7ab60beac5b21a3" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.365656 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-d5dz5" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.464593 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv"] Nov 26 05:59:32 crc kubenswrapper[4871]: E1126 05:59:32.465245 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a3a838b-5101-4706-a5d9-50fc5797ba72" containerName="ssh-known-hosts-edpm-deployment" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.465267 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a3a838b-5101-4706-a5d9-50fc5797ba72" containerName="ssh-known-hosts-edpm-deployment" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.465701 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a3a838b-5101-4706-a5d9-50fc5797ba72" containerName="ssh-known-hosts-edpm-deployment" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.466820 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.470908 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.471427 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.471722 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-pjzlp" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.475581 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.479119 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv"] Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.499991 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fca1e368-592f-4da5-b8f8-12bb29eca743-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-cglzv\" (UID: \"fca1e368-592f-4da5-b8f8-12bb29eca743\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.500388 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fca1e368-592f-4da5-b8f8-12bb29eca743-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-cglzv\" (UID: \"fca1e368-592f-4da5-b8f8-12bb29eca743\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.500541 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xms7j\" (UniqueName: \"kubernetes.io/projected/fca1e368-592f-4da5-b8f8-12bb29eca743-kube-api-access-xms7j\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-cglzv\" (UID: \"fca1e368-592f-4da5-b8f8-12bb29eca743\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.602239 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fca1e368-592f-4da5-b8f8-12bb29eca743-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-cglzv\" (UID: \"fca1e368-592f-4da5-b8f8-12bb29eca743\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.602321 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xms7j\" (UniqueName: \"kubernetes.io/projected/fca1e368-592f-4da5-b8f8-12bb29eca743-kube-api-access-xms7j\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-cglzv\" (UID: \"fca1e368-592f-4da5-b8f8-12bb29eca743\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.602470 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fca1e368-592f-4da5-b8f8-12bb29eca743-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-cglzv\" (UID: \"fca1e368-592f-4da5-b8f8-12bb29eca743\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.606732 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fca1e368-592f-4da5-b8f8-12bb29eca743-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-cglzv\" (UID: \"fca1e368-592f-4da5-b8f8-12bb29eca743\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.611263 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fca1e368-592f-4da5-b8f8-12bb29eca743-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-cglzv\" (UID: \"fca1e368-592f-4da5-b8f8-12bb29eca743\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.620697 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xms7j\" (UniqueName: \"kubernetes.io/projected/fca1e368-592f-4da5-b8f8-12bb29eca743-kube-api-access-xms7j\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-cglzv\" (UID: \"fca1e368-592f-4da5-b8f8-12bb29eca743\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv" Nov 26 05:59:32 crc kubenswrapper[4871]: I1126 05:59:32.803361 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv" Nov 26 05:59:33 crc kubenswrapper[4871]: I1126 05:59:33.420251 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv"] Nov 26 05:59:34 crc kubenswrapper[4871]: I1126 05:59:34.390453 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv" event={"ID":"fca1e368-592f-4da5-b8f8-12bb29eca743","Type":"ContainerStarted","Data":"1cdd1fc7086c77be12d72a6b99a38b000070ca270a157ef308593e165df23dd0"} Nov 26 05:59:34 crc kubenswrapper[4871]: I1126 05:59:34.390787 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv" event={"ID":"fca1e368-592f-4da5-b8f8-12bb29eca743","Type":"ContainerStarted","Data":"72ed333eff328de81d9d52171e572113bae642bc4f81598f53ff31dc3a2f0431"} Nov 26 05:59:34 crc kubenswrapper[4871]: I1126 05:59:34.413404 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv" podStartSLOduration=1.996637766 podStartE2EDuration="2.413377372s" podCreationTimestamp="2025-11-26 05:59:32 +0000 UTC" firstStartedPulling="2025-11-26 05:59:33.428847918 +0000 UTC m=+2031.611899504" lastFinishedPulling="2025-11-26 05:59:33.845587534 +0000 UTC m=+2032.028639110" observedRunningTime="2025-11-26 05:59:34.405185059 +0000 UTC m=+2032.588236715" watchObservedRunningTime="2025-11-26 05:59:34.413377372 +0000 UTC m=+2032.596428998" Nov 26 05:59:42 crc kubenswrapper[4871]: I1126 05:59:42.486221 4871 generic.go:334] "Generic (PLEG): container finished" podID="fca1e368-592f-4da5-b8f8-12bb29eca743" containerID="1cdd1fc7086c77be12d72a6b99a38b000070ca270a157ef308593e165df23dd0" exitCode=0 Nov 26 05:59:42 crc kubenswrapper[4871]: I1126 05:59:42.486329 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv" event={"ID":"fca1e368-592f-4da5-b8f8-12bb29eca743","Type":"ContainerDied","Data":"1cdd1fc7086c77be12d72a6b99a38b000070ca270a157ef308593e165df23dd0"} Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.026331 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.058385 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xms7j\" (UniqueName: \"kubernetes.io/projected/fca1e368-592f-4da5-b8f8-12bb29eca743-kube-api-access-xms7j\") pod \"fca1e368-592f-4da5-b8f8-12bb29eca743\" (UID: \"fca1e368-592f-4da5-b8f8-12bb29eca743\") " Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.058577 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fca1e368-592f-4da5-b8f8-12bb29eca743-inventory\") pod \"fca1e368-592f-4da5-b8f8-12bb29eca743\" (UID: \"fca1e368-592f-4da5-b8f8-12bb29eca743\") " Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.058742 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fca1e368-592f-4da5-b8f8-12bb29eca743-ssh-key\") pod \"fca1e368-592f-4da5-b8f8-12bb29eca743\" (UID: \"fca1e368-592f-4da5-b8f8-12bb29eca743\") " Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.063779 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fca1e368-592f-4da5-b8f8-12bb29eca743-kube-api-access-xms7j" (OuterVolumeSpecName: "kube-api-access-xms7j") pod "fca1e368-592f-4da5-b8f8-12bb29eca743" (UID: "fca1e368-592f-4da5-b8f8-12bb29eca743"). InnerVolumeSpecName "kube-api-access-xms7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.088286 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fca1e368-592f-4da5-b8f8-12bb29eca743-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "fca1e368-592f-4da5-b8f8-12bb29eca743" (UID: "fca1e368-592f-4da5-b8f8-12bb29eca743"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.108471 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fca1e368-592f-4da5-b8f8-12bb29eca743-inventory" (OuterVolumeSpecName: "inventory") pod "fca1e368-592f-4da5-b8f8-12bb29eca743" (UID: "fca1e368-592f-4da5-b8f8-12bb29eca743"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.161883 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xms7j\" (UniqueName: \"kubernetes.io/projected/fca1e368-592f-4da5-b8f8-12bb29eca743-kube-api-access-xms7j\") on node \"crc\" DevicePath \"\"" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.161945 4871 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fca1e368-592f-4da5-b8f8-12bb29eca743-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.161965 4871 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fca1e368-592f-4da5-b8f8-12bb29eca743-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.517591 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.525317 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-cglzv" event={"ID":"fca1e368-592f-4da5-b8f8-12bb29eca743","Type":"ContainerDied","Data":"72ed333eff328de81d9d52171e572113bae642bc4f81598f53ff31dc3a2f0431"} Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.525380 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="72ed333eff328de81d9d52171e572113bae642bc4f81598f53ff31dc3a2f0431" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.614579 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m"] Nov 26 05:59:44 crc kubenswrapper[4871]: E1126 05:59:44.615014 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fca1e368-592f-4da5-b8f8-12bb29eca743" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.615036 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="fca1e368-592f-4da5-b8f8-12bb29eca743" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.615303 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="fca1e368-592f-4da5-b8f8-12bb29eca743" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.616032 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.618326 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.618456 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.621439 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.623988 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-pjzlp" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.639454 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m"] Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.672134 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d39ab741-a044-4ac6-9f2a-0949948cafdb-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m\" (UID: \"d39ab741-a044-4ac6-9f2a-0949948cafdb\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.672220 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d39ab741-a044-4ac6-9f2a-0949948cafdb-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m\" (UID: \"d39ab741-a044-4ac6-9f2a-0949948cafdb\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.672425 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vft2\" (UniqueName: \"kubernetes.io/projected/d39ab741-a044-4ac6-9f2a-0949948cafdb-kube-api-access-7vft2\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m\" (UID: \"d39ab741-a044-4ac6-9f2a-0949948cafdb\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.774067 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vft2\" (UniqueName: \"kubernetes.io/projected/d39ab741-a044-4ac6-9f2a-0949948cafdb-kube-api-access-7vft2\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m\" (UID: \"d39ab741-a044-4ac6-9f2a-0949948cafdb\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.774219 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d39ab741-a044-4ac6-9f2a-0949948cafdb-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m\" (UID: \"d39ab741-a044-4ac6-9f2a-0949948cafdb\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.774300 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d39ab741-a044-4ac6-9f2a-0949948cafdb-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m\" (UID: \"d39ab741-a044-4ac6-9f2a-0949948cafdb\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.779546 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d39ab741-a044-4ac6-9f2a-0949948cafdb-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m\" (UID: \"d39ab741-a044-4ac6-9f2a-0949948cafdb\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.779730 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d39ab741-a044-4ac6-9f2a-0949948cafdb-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m\" (UID: \"d39ab741-a044-4ac6-9f2a-0949948cafdb\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.796842 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vft2\" (UniqueName: \"kubernetes.io/projected/d39ab741-a044-4ac6-9f2a-0949948cafdb-kube-api-access-7vft2\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m\" (UID: \"d39ab741-a044-4ac6-9f2a-0949948cafdb\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m" Nov 26 05:59:44 crc kubenswrapper[4871]: I1126 05:59:44.944414 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m" Nov 26 05:59:45 crc kubenswrapper[4871]: I1126 05:59:45.479387 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m"] Nov 26 05:59:45 crc kubenswrapper[4871]: I1126 05:59:45.530313 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m" event={"ID":"d39ab741-a044-4ac6-9f2a-0949948cafdb","Type":"ContainerStarted","Data":"e276cdc8412880b2e6257dd97119ab0c961b4eb49680f2e940ddf5dc6b61e94a"} Nov 26 05:59:46 crc kubenswrapper[4871]: I1126 05:59:46.546845 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m" event={"ID":"d39ab741-a044-4ac6-9f2a-0949948cafdb","Type":"ContainerStarted","Data":"c2f4b171f31f92aec779b8ffb761e05169e4bee10df6acfb65f29bcdc267a995"} Nov 26 05:59:46 crc kubenswrapper[4871]: I1126 05:59:46.575637 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m" podStartSLOduration=2.099800614 podStartE2EDuration="2.575615397s" podCreationTimestamp="2025-11-26 05:59:44 +0000 UTC" firstStartedPulling="2025-11-26 05:59:45.491302545 +0000 UTC m=+2043.674354121" lastFinishedPulling="2025-11-26 05:59:45.967117278 +0000 UTC m=+2044.150168904" observedRunningTime="2025-11-26 05:59:46.569359862 +0000 UTC m=+2044.752411448" watchObservedRunningTime="2025-11-26 05:59:46.575615397 +0000 UTC m=+2044.758667003" Nov 26 05:59:53 crc kubenswrapper[4871]: I1126 05:59:53.614760 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 05:59:53 crc kubenswrapper[4871]: I1126 05:59:53.615373 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 05:59:53 crc kubenswrapper[4871]: I1126 05:59:53.615454 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 05:59:53 crc kubenswrapper[4871]: I1126 05:59:53.616305 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"690714808ddb6f01775f9048b33eb3edf7f436bcc65bc1eb71c8be346f112ef0"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 05:59:53 crc kubenswrapper[4871]: I1126 05:59:53.616420 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://690714808ddb6f01775f9048b33eb3edf7f436bcc65bc1eb71c8be346f112ef0" gracePeriod=600 Nov 26 05:59:54 crc kubenswrapper[4871]: I1126 05:59:54.666662 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="690714808ddb6f01775f9048b33eb3edf7f436bcc65bc1eb71c8be346f112ef0" exitCode=0 Nov 26 05:59:54 crc kubenswrapper[4871]: I1126 05:59:54.666763 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"690714808ddb6f01775f9048b33eb3edf7f436bcc65bc1eb71c8be346f112ef0"} Nov 26 05:59:54 crc kubenswrapper[4871]: I1126 05:59:54.667714 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0"} Nov 26 05:59:54 crc kubenswrapper[4871]: I1126 05:59:54.667754 4871 scope.go:117] "RemoveContainer" containerID="2a504ea257de40b6a442423001f6ea59df155c720214c722b284a8033abea9ff" Nov 26 05:59:56 crc kubenswrapper[4871]: I1126 05:59:56.697189 4871 generic.go:334] "Generic (PLEG): container finished" podID="d39ab741-a044-4ac6-9f2a-0949948cafdb" containerID="c2f4b171f31f92aec779b8ffb761e05169e4bee10df6acfb65f29bcdc267a995" exitCode=0 Nov 26 05:59:56 crc kubenswrapper[4871]: I1126 05:59:56.697278 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m" event={"ID":"d39ab741-a044-4ac6-9f2a-0949948cafdb","Type":"ContainerDied","Data":"c2f4b171f31f92aec779b8ffb761e05169e4bee10df6acfb65f29bcdc267a995"} Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.164850 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.298817 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d39ab741-a044-4ac6-9f2a-0949948cafdb-inventory\") pod \"d39ab741-a044-4ac6-9f2a-0949948cafdb\" (UID: \"d39ab741-a044-4ac6-9f2a-0949948cafdb\") " Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.299047 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d39ab741-a044-4ac6-9f2a-0949948cafdb-ssh-key\") pod \"d39ab741-a044-4ac6-9f2a-0949948cafdb\" (UID: \"d39ab741-a044-4ac6-9f2a-0949948cafdb\") " Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.299265 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7vft2\" (UniqueName: \"kubernetes.io/projected/d39ab741-a044-4ac6-9f2a-0949948cafdb-kube-api-access-7vft2\") pod \"d39ab741-a044-4ac6-9f2a-0949948cafdb\" (UID: \"d39ab741-a044-4ac6-9f2a-0949948cafdb\") " Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.303946 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d39ab741-a044-4ac6-9f2a-0949948cafdb-kube-api-access-7vft2" (OuterVolumeSpecName: "kube-api-access-7vft2") pod "d39ab741-a044-4ac6-9f2a-0949948cafdb" (UID: "d39ab741-a044-4ac6-9f2a-0949948cafdb"). InnerVolumeSpecName "kube-api-access-7vft2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.350256 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d39ab741-a044-4ac6-9f2a-0949948cafdb-inventory" (OuterVolumeSpecName: "inventory") pod "d39ab741-a044-4ac6-9f2a-0949948cafdb" (UID: "d39ab741-a044-4ac6-9f2a-0949948cafdb"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.352573 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d39ab741-a044-4ac6-9f2a-0949948cafdb-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d39ab741-a044-4ac6-9f2a-0949948cafdb" (UID: "d39ab741-a044-4ac6-9f2a-0949948cafdb"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.403387 4871 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d39ab741-a044-4ac6-9f2a-0949948cafdb-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.403632 4871 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d39ab741-a044-4ac6-9f2a-0949948cafdb-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.403684 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7vft2\" (UniqueName: \"kubernetes.io/projected/d39ab741-a044-4ac6-9f2a-0949948cafdb-kube-api-access-7vft2\") on node \"crc\" DevicePath \"\"" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.735350 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m" event={"ID":"d39ab741-a044-4ac6-9f2a-0949948cafdb","Type":"ContainerDied","Data":"e276cdc8412880b2e6257dd97119ab0c961b4eb49680f2e940ddf5dc6b61e94a"} Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.735424 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e276cdc8412880b2e6257dd97119ab0c961b4eb49680f2e940ddf5dc6b61e94a" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.735655 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.842708 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj"] Nov 26 05:59:58 crc kubenswrapper[4871]: E1126 05:59:58.843390 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d39ab741-a044-4ac6-9f2a-0949948cafdb" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.843412 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="d39ab741-a044-4ac6-9f2a-0949948cafdb" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.843738 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="d39ab741-a044-4ac6-9f2a-0949948cafdb" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.844568 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.847510 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.848338 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.848603 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.848833 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.849171 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-pjzlp" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.849398 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.849832 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.851845 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 05:59:58 crc kubenswrapper[4871]: I1126 05:59:58.861644 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj"] Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.019417 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.019668 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.019725 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.019771 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.019919 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.020098 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.020323 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.020458 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.020593 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.020722 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvtdw\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-kube-api-access-fvtdw\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.020861 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.021040 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.021140 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.021258 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.122883 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.122970 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.123015 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.123061 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.123086 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvtdw\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-kube-api-access-fvtdw\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.123127 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.123169 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.123198 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.123232 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.123269 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.123305 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.123326 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.123355 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.123401 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.131158 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.131802 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.132162 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.133475 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.134947 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.135679 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.135776 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.136759 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.137395 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.138842 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.139696 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.145389 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.146802 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.159020 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvtdw\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-kube-api-access-fvtdw\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.176504 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.613381 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj"] Nov 26 05:59:59 crc kubenswrapper[4871]: I1126 05:59:59.745859 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" event={"ID":"48acdf72-822b-456b-b545-bd1499db855d","Type":"ContainerStarted","Data":"cd30a429a670d2caddace583bf7a93aaa2a299dc3b45c9fc01de247b44e0fead"} Nov 26 06:00:00 crc kubenswrapper[4871]: I1126 06:00:00.193991 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc"] Nov 26 06:00:00 crc kubenswrapper[4871]: I1126 06:00:00.195769 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc" Nov 26 06:00:00 crc kubenswrapper[4871]: I1126 06:00:00.198215 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 06:00:00 crc kubenswrapper[4871]: I1126 06:00:00.200287 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 06:00:00 crc kubenswrapper[4871]: I1126 06:00:00.202936 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc"] Nov 26 06:00:00 crc kubenswrapper[4871]: I1126 06:00:00.357770 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b032092f-123d-4532-8193-05c7afe3011d-config-volume\") pod \"collect-profiles-29402280-gnfvc\" (UID: \"b032092f-123d-4532-8193-05c7afe3011d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc" Nov 26 06:00:00 crc kubenswrapper[4871]: I1126 06:00:00.357854 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9n9t\" (UniqueName: \"kubernetes.io/projected/b032092f-123d-4532-8193-05c7afe3011d-kube-api-access-s9n9t\") pod \"collect-profiles-29402280-gnfvc\" (UID: \"b032092f-123d-4532-8193-05c7afe3011d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc" Nov 26 06:00:00 crc kubenswrapper[4871]: I1126 06:00:00.358168 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b032092f-123d-4532-8193-05c7afe3011d-secret-volume\") pod \"collect-profiles-29402280-gnfvc\" (UID: \"b032092f-123d-4532-8193-05c7afe3011d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc" Nov 26 06:00:00 crc kubenswrapper[4871]: I1126 06:00:00.460564 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b032092f-123d-4532-8193-05c7afe3011d-config-volume\") pod \"collect-profiles-29402280-gnfvc\" (UID: \"b032092f-123d-4532-8193-05c7afe3011d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc" Nov 26 06:00:00 crc kubenswrapper[4871]: I1126 06:00:00.460645 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9n9t\" (UniqueName: \"kubernetes.io/projected/b032092f-123d-4532-8193-05c7afe3011d-kube-api-access-s9n9t\") pod \"collect-profiles-29402280-gnfvc\" (UID: \"b032092f-123d-4532-8193-05c7afe3011d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc" Nov 26 06:00:00 crc kubenswrapper[4871]: I1126 06:00:00.460719 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b032092f-123d-4532-8193-05c7afe3011d-secret-volume\") pod \"collect-profiles-29402280-gnfvc\" (UID: \"b032092f-123d-4532-8193-05c7afe3011d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc" Nov 26 06:00:00 crc kubenswrapper[4871]: I1126 06:00:00.462021 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b032092f-123d-4532-8193-05c7afe3011d-config-volume\") pod \"collect-profiles-29402280-gnfvc\" (UID: \"b032092f-123d-4532-8193-05c7afe3011d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc" Nov 26 06:00:00 crc kubenswrapper[4871]: I1126 06:00:00.465520 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b032092f-123d-4532-8193-05c7afe3011d-secret-volume\") pod \"collect-profiles-29402280-gnfvc\" (UID: \"b032092f-123d-4532-8193-05c7afe3011d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc" Nov 26 06:00:00 crc kubenswrapper[4871]: I1126 06:00:00.480278 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9n9t\" (UniqueName: \"kubernetes.io/projected/b032092f-123d-4532-8193-05c7afe3011d-kube-api-access-s9n9t\") pod \"collect-profiles-29402280-gnfvc\" (UID: \"b032092f-123d-4532-8193-05c7afe3011d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc" Nov 26 06:00:00 crc kubenswrapper[4871]: I1126 06:00:00.635397 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc" Nov 26 06:00:00 crc kubenswrapper[4871]: I1126 06:00:00.758628 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" event={"ID":"48acdf72-822b-456b-b545-bd1499db855d","Type":"ContainerStarted","Data":"46cb2c0cf488fd2476f4db6d09471fcbe9a163c43a48dc08ad552a22abf68422"} Nov 26 06:00:00 crc kubenswrapper[4871]: I1126 06:00:00.784154 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" podStartSLOduration=2.337152282 podStartE2EDuration="2.784132739s" podCreationTimestamp="2025-11-26 05:59:58 +0000 UTC" firstStartedPulling="2025-11-26 05:59:59.617506671 +0000 UTC m=+2057.800558247" lastFinishedPulling="2025-11-26 06:00:00.064487078 +0000 UTC m=+2058.247538704" observedRunningTime="2025-11-26 06:00:00.776077769 +0000 UTC m=+2058.959129375" watchObservedRunningTime="2025-11-26 06:00:00.784132739 +0000 UTC m=+2058.967184325" Nov 26 06:00:01 crc kubenswrapper[4871]: W1126 06:00:01.138634 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb032092f_123d_4532_8193_05c7afe3011d.slice/crio-f7e6e2b058897db0d0065c0dbb4abbc300debe922d4ed6aea9a7893db6379074 WatchSource:0}: Error finding container f7e6e2b058897db0d0065c0dbb4abbc300debe922d4ed6aea9a7893db6379074: Status 404 returned error can't find the container with id f7e6e2b058897db0d0065c0dbb4abbc300debe922d4ed6aea9a7893db6379074 Nov 26 06:00:01 crc kubenswrapper[4871]: I1126 06:00:01.146300 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc"] Nov 26 06:00:01 crc kubenswrapper[4871]: I1126 06:00:01.785840 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc" event={"ID":"b032092f-123d-4532-8193-05c7afe3011d","Type":"ContainerStarted","Data":"ea430ac90e595af09c8ffb9a0c1286ce1636bce501c829a7b68af6accad36bd1"} Nov 26 06:00:01 crc kubenswrapper[4871]: I1126 06:00:01.785889 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc" event={"ID":"b032092f-123d-4532-8193-05c7afe3011d","Type":"ContainerStarted","Data":"f7e6e2b058897db0d0065c0dbb4abbc300debe922d4ed6aea9a7893db6379074"} Nov 26 06:00:01 crc kubenswrapper[4871]: I1126 06:00:01.805748 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc" podStartSLOduration=1.805729114 podStartE2EDuration="1.805729114s" podCreationTimestamp="2025-11-26 06:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:00:01.802285019 +0000 UTC m=+2059.985336695" watchObservedRunningTime="2025-11-26 06:00:01.805729114 +0000 UTC m=+2059.988780700" Nov 26 06:00:02 crc kubenswrapper[4871]: I1126 06:00:02.802078 4871 generic.go:334] "Generic (PLEG): container finished" podID="b032092f-123d-4532-8193-05c7afe3011d" containerID="ea430ac90e595af09c8ffb9a0c1286ce1636bce501c829a7b68af6accad36bd1" exitCode=0 Nov 26 06:00:02 crc kubenswrapper[4871]: I1126 06:00:02.802244 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc" event={"ID":"b032092f-123d-4532-8193-05c7afe3011d","Type":"ContainerDied","Data":"ea430ac90e595af09c8ffb9a0c1286ce1636bce501c829a7b68af6accad36bd1"} Nov 26 06:00:04 crc kubenswrapper[4871]: I1126 06:00:04.138158 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc" Nov 26 06:00:04 crc kubenswrapper[4871]: I1126 06:00:04.245098 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b032092f-123d-4532-8193-05c7afe3011d-secret-volume\") pod \"b032092f-123d-4532-8193-05c7afe3011d\" (UID: \"b032092f-123d-4532-8193-05c7afe3011d\") " Nov 26 06:00:04 crc kubenswrapper[4871]: I1126 06:00:04.245204 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b032092f-123d-4532-8193-05c7afe3011d-config-volume\") pod \"b032092f-123d-4532-8193-05c7afe3011d\" (UID: \"b032092f-123d-4532-8193-05c7afe3011d\") " Nov 26 06:00:04 crc kubenswrapper[4871]: I1126 06:00:04.245599 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9n9t\" (UniqueName: \"kubernetes.io/projected/b032092f-123d-4532-8193-05c7afe3011d-kube-api-access-s9n9t\") pod \"b032092f-123d-4532-8193-05c7afe3011d\" (UID: \"b032092f-123d-4532-8193-05c7afe3011d\") " Nov 26 06:00:04 crc kubenswrapper[4871]: I1126 06:00:04.245878 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b032092f-123d-4532-8193-05c7afe3011d-config-volume" (OuterVolumeSpecName: "config-volume") pod "b032092f-123d-4532-8193-05c7afe3011d" (UID: "b032092f-123d-4532-8193-05c7afe3011d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:00:04 crc kubenswrapper[4871]: I1126 06:00:04.246389 4871 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b032092f-123d-4532-8193-05c7afe3011d-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:04 crc kubenswrapper[4871]: I1126 06:00:04.252037 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b032092f-123d-4532-8193-05c7afe3011d-kube-api-access-s9n9t" (OuterVolumeSpecName: "kube-api-access-s9n9t") pod "b032092f-123d-4532-8193-05c7afe3011d" (UID: "b032092f-123d-4532-8193-05c7afe3011d"). InnerVolumeSpecName "kube-api-access-s9n9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:00:04 crc kubenswrapper[4871]: I1126 06:00:04.252784 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b032092f-123d-4532-8193-05c7afe3011d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b032092f-123d-4532-8193-05c7afe3011d" (UID: "b032092f-123d-4532-8193-05c7afe3011d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:00:04 crc kubenswrapper[4871]: I1126 06:00:04.349071 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9n9t\" (UniqueName: \"kubernetes.io/projected/b032092f-123d-4532-8193-05c7afe3011d-kube-api-access-s9n9t\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:04 crc kubenswrapper[4871]: I1126 06:00:04.349160 4871 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b032092f-123d-4532-8193-05c7afe3011d-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:04 crc kubenswrapper[4871]: I1126 06:00:04.840455 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc" event={"ID":"b032092f-123d-4532-8193-05c7afe3011d","Type":"ContainerDied","Data":"f7e6e2b058897db0d0065c0dbb4abbc300debe922d4ed6aea9a7893db6379074"} Nov 26 06:00:04 crc kubenswrapper[4871]: I1126 06:00:04.840933 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7e6e2b058897db0d0065c0dbb4abbc300debe922d4ed6aea9a7893db6379074" Nov 26 06:00:04 crc kubenswrapper[4871]: I1126 06:00:04.841192 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc" Nov 26 06:00:04 crc kubenswrapper[4871]: I1126 06:00:04.912488 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5"] Nov 26 06:00:04 crc kubenswrapper[4871]: I1126 06:00:04.927338 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402235-s44z5"] Nov 26 06:00:06 crc kubenswrapper[4871]: I1126 06:00:06.521723 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb3f5110-df96-4946-b0a5-3439ab4e1724" path="/var/lib/kubelet/pods/cb3f5110-df96-4946-b0a5-3439ab4e1724/volumes" Nov 26 06:00:13 crc kubenswrapper[4871]: I1126 06:00:13.081837 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tcvc5"] Nov 26 06:00:13 crc kubenswrapper[4871]: E1126 06:00:13.084086 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b032092f-123d-4532-8193-05c7afe3011d" containerName="collect-profiles" Nov 26 06:00:13 crc kubenswrapper[4871]: I1126 06:00:13.084174 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="b032092f-123d-4532-8193-05c7afe3011d" containerName="collect-profiles" Nov 26 06:00:13 crc kubenswrapper[4871]: I1126 06:00:13.084455 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="b032092f-123d-4532-8193-05c7afe3011d" containerName="collect-profiles" Nov 26 06:00:13 crc kubenswrapper[4871]: I1126 06:00:13.087185 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tcvc5" Nov 26 06:00:13 crc kubenswrapper[4871]: I1126 06:00:13.107837 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tcvc5"] Nov 26 06:00:13 crc kubenswrapper[4871]: I1126 06:00:13.140964 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9861fac-72fd-4865-b50f-782a206d4fe1-utilities\") pod \"redhat-marketplace-tcvc5\" (UID: \"b9861fac-72fd-4865-b50f-782a206d4fe1\") " pod="openshift-marketplace/redhat-marketplace-tcvc5" Nov 26 06:00:13 crc kubenswrapper[4871]: I1126 06:00:13.141022 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9861fac-72fd-4865-b50f-782a206d4fe1-catalog-content\") pod \"redhat-marketplace-tcvc5\" (UID: \"b9861fac-72fd-4865-b50f-782a206d4fe1\") " pod="openshift-marketplace/redhat-marketplace-tcvc5" Nov 26 06:00:13 crc kubenswrapper[4871]: I1126 06:00:13.141124 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fg77d\" (UniqueName: \"kubernetes.io/projected/b9861fac-72fd-4865-b50f-782a206d4fe1-kube-api-access-fg77d\") pod \"redhat-marketplace-tcvc5\" (UID: \"b9861fac-72fd-4865-b50f-782a206d4fe1\") " pod="openshift-marketplace/redhat-marketplace-tcvc5" Nov 26 06:00:13 crc kubenswrapper[4871]: I1126 06:00:13.242286 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9861fac-72fd-4865-b50f-782a206d4fe1-utilities\") pod \"redhat-marketplace-tcvc5\" (UID: \"b9861fac-72fd-4865-b50f-782a206d4fe1\") " pod="openshift-marketplace/redhat-marketplace-tcvc5" Nov 26 06:00:13 crc kubenswrapper[4871]: I1126 06:00:13.242327 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9861fac-72fd-4865-b50f-782a206d4fe1-catalog-content\") pod \"redhat-marketplace-tcvc5\" (UID: \"b9861fac-72fd-4865-b50f-782a206d4fe1\") " pod="openshift-marketplace/redhat-marketplace-tcvc5" Nov 26 06:00:13 crc kubenswrapper[4871]: I1126 06:00:13.242416 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fg77d\" (UniqueName: \"kubernetes.io/projected/b9861fac-72fd-4865-b50f-782a206d4fe1-kube-api-access-fg77d\") pod \"redhat-marketplace-tcvc5\" (UID: \"b9861fac-72fd-4865-b50f-782a206d4fe1\") " pod="openshift-marketplace/redhat-marketplace-tcvc5" Nov 26 06:00:13 crc kubenswrapper[4871]: I1126 06:00:13.242933 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9861fac-72fd-4865-b50f-782a206d4fe1-utilities\") pod \"redhat-marketplace-tcvc5\" (UID: \"b9861fac-72fd-4865-b50f-782a206d4fe1\") " pod="openshift-marketplace/redhat-marketplace-tcvc5" Nov 26 06:00:13 crc kubenswrapper[4871]: I1126 06:00:13.242943 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9861fac-72fd-4865-b50f-782a206d4fe1-catalog-content\") pod \"redhat-marketplace-tcvc5\" (UID: \"b9861fac-72fd-4865-b50f-782a206d4fe1\") " pod="openshift-marketplace/redhat-marketplace-tcvc5" Nov 26 06:00:13 crc kubenswrapper[4871]: I1126 06:00:13.263492 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fg77d\" (UniqueName: \"kubernetes.io/projected/b9861fac-72fd-4865-b50f-782a206d4fe1-kube-api-access-fg77d\") pod \"redhat-marketplace-tcvc5\" (UID: \"b9861fac-72fd-4865-b50f-782a206d4fe1\") " pod="openshift-marketplace/redhat-marketplace-tcvc5" Nov 26 06:00:13 crc kubenswrapper[4871]: I1126 06:00:13.415199 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tcvc5" Nov 26 06:00:13 crc kubenswrapper[4871]: I1126 06:00:13.909455 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tcvc5"] Nov 26 06:00:13 crc kubenswrapper[4871]: W1126 06:00:13.917673 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb9861fac_72fd_4865_b50f_782a206d4fe1.slice/crio-7f1d0e1d69f5d8556ea3a7aa57770f51f67309fb87eb1f0891990a5d3ef5c558 WatchSource:0}: Error finding container 7f1d0e1d69f5d8556ea3a7aa57770f51f67309fb87eb1f0891990a5d3ef5c558: Status 404 returned error can't find the container with id 7f1d0e1d69f5d8556ea3a7aa57770f51f67309fb87eb1f0891990a5d3ef5c558 Nov 26 06:00:13 crc kubenswrapper[4871]: I1126 06:00:13.993662 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tcvc5" event={"ID":"b9861fac-72fd-4865-b50f-782a206d4fe1","Type":"ContainerStarted","Data":"7f1d0e1d69f5d8556ea3a7aa57770f51f67309fb87eb1f0891990a5d3ef5c558"} Nov 26 06:00:15 crc kubenswrapper[4871]: I1126 06:00:15.005195 4871 generic.go:334] "Generic (PLEG): container finished" podID="b9861fac-72fd-4865-b50f-782a206d4fe1" containerID="b1122f480d5cedead67221900dc179c0409a02fb22cdf949147338bdb0c8c6ae" exitCode=0 Nov 26 06:00:15 crc kubenswrapper[4871]: I1126 06:00:15.005268 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tcvc5" event={"ID":"b9861fac-72fd-4865-b50f-782a206d4fe1","Type":"ContainerDied","Data":"b1122f480d5cedead67221900dc179c0409a02fb22cdf949147338bdb0c8c6ae"} Nov 26 06:00:16 crc kubenswrapper[4871]: I1126 06:00:16.014789 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tcvc5" event={"ID":"b9861fac-72fd-4865-b50f-782a206d4fe1","Type":"ContainerStarted","Data":"c7df3313555925f4dc4e3d6661e06b8aaa33933994a52d1756eb46c4aea1c97f"} Nov 26 06:00:16 crc kubenswrapper[4871]: I1126 06:00:16.871251 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5gn8j"] Nov 26 06:00:16 crc kubenswrapper[4871]: I1126 06:00:16.874407 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5gn8j" Nov 26 06:00:16 crc kubenswrapper[4871]: I1126 06:00:16.884859 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5gn8j"] Nov 26 06:00:16 crc kubenswrapper[4871]: I1126 06:00:16.919260 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kq5kp\" (UniqueName: \"kubernetes.io/projected/812937ba-99fa-409f-9e16-d1fa71463236-kube-api-access-kq5kp\") pod \"redhat-operators-5gn8j\" (UID: \"812937ba-99fa-409f-9e16-d1fa71463236\") " pod="openshift-marketplace/redhat-operators-5gn8j" Nov 26 06:00:16 crc kubenswrapper[4871]: I1126 06:00:16.919310 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/812937ba-99fa-409f-9e16-d1fa71463236-catalog-content\") pod \"redhat-operators-5gn8j\" (UID: \"812937ba-99fa-409f-9e16-d1fa71463236\") " pod="openshift-marketplace/redhat-operators-5gn8j" Nov 26 06:00:16 crc kubenswrapper[4871]: I1126 06:00:16.919446 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/812937ba-99fa-409f-9e16-d1fa71463236-utilities\") pod \"redhat-operators-5gn8j\" (UID: \"812937ba-99fa-409f-9e16-d1fa71463236\") " pod="openshift-marketplace/redhat-operators-5gn8j" Nov 26 06:00:17 crc kubenswrapper[4871]: I1126 06:00:17.021922 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kq5kp\" (UniqueName: \"kubernetes.io/projected/812937ba-99fa-409f-9e16-d1fa71463236-kube-api-access-kq5kp\") pod \"redhat-operators-5gn8j\" (UID: \"812937ba-99fa-409f-9e16-d1fa71463236\") " pod="openshift-marketplace/redhat-operators-5gn8j" Nov 26 06:00:17 crc kubenswrapper[4871]: I1126 06:00:17.021988 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/812937ba-99fa-409f-9e16-d1fa71463236-catalog-content\") pod \"redhat-operators-5gn8j\" (UID: \"812937ba-99fa-409f-9e16-d1fa71463236\") " pod="openshift-marketplace/redhat-operators-5gn8j" Nov 26 06:00:17 crc kubenswrapper[4871]: I1126 06:00:17.022145 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/812937ba-99fa-409f-9e16-d1fa71463236-utilities\") pod \"redhat-operators-5gn8j\" (UID: \"812937ba-99fa-409f-9e16-d1fa71463236\") " pod="openshift-marketplace/redhat-operators-5gn8j" Nov 26 06:00:17 crc kubenswrapper[4871]: I1126 06:00:17.022731 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/812937ba-99fa-409f-9e16-d1fa71463236-catalog-content\") pod \"redhat-operators-5gn8j\" (UID: \"812937ba-99fa-409f-9e16-d1fa71463236\") " pod="openshift-marketplace/redhat-operators-5gn8j" Nov 26 06:00:17 crc kubenswrapper[4871]: I1126 06:00:17.022742 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/812937ba-99fa-409f-9e16-d1fa71463236-utilities\") pod \"redhat-operators-5gn8j\" (UID: \"812937ba-99fa-409f-9e16-d1fa71463236\") " pod="openshift-marketplace/redhat-operators-5gn8j" Nov 26 06:00:17 crc kubenswrapper[4871]: I1126 06:00:17.025816 4871 generic.go:334] "Generic (PLEG): container finished" podID="b9861fac-72fd-4865-b50f-782a206d4fe1" containerID="c7df3313555925f4dc4e3d6661e06b8aaa33933994a52d1756eb46c4aea1c97f" exitCode=0 Nov 26 06:00:17 crc kubenswrapper[4871]: I1126 06:00:17.025860 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tcvc5" event={"ID":"b9861fac-72fd-4865-b50f-782a206d4fe1","Type":"ContainerDied","Data":"c7df3313555925f4dc4e3d6661e06b8aaa33933994a52d1756eb46c4aea1c97f"} Nov 26 06:00:17 crc kubenswrapper[4871]: I1126 06:00:17.050576 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kq5kp\" (UniqueName: \"kubernetes.io/projected/812937ba-99fa-409f-9e16-d1fa71463236-kube-api-access-kq5kp\") pod \"redhat-operators-5gn8j\" (UID: \"812937ba-99fa-409f-9e16-d1fa71463236\") " pod="openshift-marketplace/redhat-operators-5gn8j" Nov 26 06:00:17 crc kubenswrapper[4871]: I1126 06:00:17.207979 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5gn8j" Nov 26 06:00:17 crc kubenswrapper[4871]: I1126 06:00:17.759995 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5gn8j"] Nov 26 06:00:17 crc kubenswrapper[4871]: W1126 06:00:17.760718 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod812937ba_99fa_409f_9e16_d1fa71463236.slice/crio-bcac81a4f44ed69575a2f5cb04cbe2dd86a10973d15edb9251e598728b223c77 WatchSource:0}: Error finding container bcac81a4f44ed69575a2f5cb04cbe2dd86a10973d15edb9251e598728b223c77: Status 404 returned error can't find the container with id bcac81a4f44ed69575a2f5cb04cbe2dd86a10973d15edb9251e598728b223c77 Nov 26 06:00:18 crc kubenswrapper[4871]: I1126 06:00:18.038058 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tcvc5" event={"ID":"b9861fac-72fd-4865-b50f-782a206d4fe1","Type":"ContainerStarted","Data":"49e559fce08e784a71364264c80d1976dcaf89fb11f06886d4b4327d3832d100"} Nov 26 06:00:18 crc kubenswrapper[4871]: I1126 06:00:18.039903 4871 generic.go:334] "Generic (PLEG): container finished" podID="812937ba-99fa-409f-9e16-d1fa71463236" containerID="b0ca1ccd3371f8291872e2fd90bc0958da150b1dcc9cd0ed176b1bc9f9e1d944" exitCode=0 Nov 26 06:00:18 crc kubenswrapper[4871]: I1126 06:00:18.039928 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5gn8j" event={"ID":"812937ba-99fa-409f-9e16-d1fa71463236","Type":"ContainerDied","Data":"b0ca1ccd3371f8291872e2fd90bc0958da150b1dcc9cd0ed176b1bc9f9e1d944"} Nov 26 06:00:18 crc kubenswrapper[4871]: I1126 06:00:18.039947 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5gn8j" event={"ID":"812937ba-99fa-409f-9e16-d1fa71463236","Type":"ContainerStarted","Data":"bcac81a4f44ed69575a2f5cb04cbe2dd86a10973d15edb9251e598728b223c77"} Nov 26 06:00:18 crc kubenswrapper[4871]: I1126 06:00:18.068615 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tcvc5" podStartSLOduration=2.614158005 podStartE2EDuration="5.068597942s" podCreationTimestamp="2025-11-26 06:00:13 +0000 UTC" firstStartedPulling="2025-11-26 06:00:15.008337011 +0000 UTC m=+2073.191388587" lastFinishedPulling="2025-11-26 06:00:17.462776938 +0000 UTC m=+2075.645828524" observedRunningTime="2025-11-26 06:00:18.061038314 +0000 UTC m=+2076.244089900" watchObservedRunningTime="2025-11-26 06:00:18.068597942 +0000 UTC m=+2076.251649528" Nov 26 06:00:19 crc kubenswrapper[4871]: I1126 06:00:19.054288 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5gn8j" event={"ID":"812937ba-99fa-409f-9e16-d1fa71463236","Type":"ContainerStarted","Data":"bb1fe90e22e4e0dbf32e9bc271b0ceac036f8f69ae1e740dbcaa3d6a5c1a5c1d"} Nov 26 06:00:22 crc kubenswrapper[4871]: I1126 06:00:22.698018 4871 scope.go:117] "RemoveContainer" containerID="2b65b3ae87375dbf86af9555f34774ddda3de3391ea7d761ffeecaebf56ed651" Nov 26 06:00:23 crc kubenswrapper[4871]: I1126 06:00:23.095983 4871 generic.go:334] "Generic (PLEG): container finished" podID="812937ba-99fa-409f-9e16-d1fa71463236" containerID="bb1fe90e22e4e0dbf32e9bc271b0ceac036f8f69ae1e740dbcaa3d6a5c1a5c1d" exitCode=0 Nov 26 06:00:23 crc kubenswrapper[4871]: I1126 06:00:23.096050 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5gn8j" event={"ID":"812937ba-99fa-409f-9e16-d1fa71463236","Type":"ContainerDied","Data":"bb1fe90e22e4e0dbf32e9bc271b0ceac036f8f69ae1e740dbcaa3d6a5c1a5c1d"} Nov 26 06:00:23 crc kubenswrapper[4871]: I1126 06:00:23.415705 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tcvc5" Nov 26 06:00:23 crc kubenswrapper[4871]: I1126 06:00:23.415809 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tcvc5" Nov 26 06:00:23 crc kubenswrapper[4871]: I1126 06:00:23.501958 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tcvc5" Nov 26 06:00:24 crc kubenswrapper[4871]: I1126 06:00:24.110878 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5gn8j" event={"ID":"812937ba-99fa-409f-9e16-d1fa71463236","Type":"ContainerStarted","Data":"1e2e35069527c4fb3b218774a7be7e05dc2c4d91f6dba2fd70b40d1441f106e7"} Nov 26 06:00:24 crc kubenswrapper[4871]: I1126 06:00:24.138400 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5gn8j" podStartSLOduration=2.626017302 podStartE2EDuration="8.138378892s" podCreationTimestamp="2025-11-26 06:00:16 +0000 UTC" firstStartedPulling="2025-11-26 06:00:18.041413126 +0000 UTC m=+2076.224464712" lastFinishedPulling="2025-11-26 06:00:23.553774716 +0000 UTC m=+2081.736826302" observedRunningTime="2025-11-26 06:00:24.131257325 +0000 UTC m=+2082.314308951" watchObservedRunningTime="2025-11-26 06:00:24.138378892 +0000 UTC m=+2082.321430478" Nov 26 06:00:24 crc kubenswrapper[4871]: I1126 06:00:24.176555 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tcvc5" Nov 26 06:00:25 crc kubenswrapper[4871]: I1126 06:00:25.057637 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tcvc5"] Nov 26 06:00:26 crc kubenswrapper[4871]: I1126 06:00:26.128427 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tcvc5" podUID="b9861fac-72fd-4865-b50f-782a206d4fe1" containerName="registry-server" containerID="cri-o://49e559fce08e784a71364264c80d1976dcaf89fb11f06886d4b4327d3832d100" gracePeriod=2 Nov 26 06:00:26 crc kubenswrapper[4871]: I1126 06:00:26.607110 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tcvc5" Nov 26 06:00:26 crc kubenswrapper[4871]: I1126 06:00:26.739051 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9861fac-72fd-4865-b50f-782a206d4fe1-catalog-content\") pod \"b9861fac-72fd-4865-b50f-782a206d4fe1\" (UID: \"b9861fac-72fd-4865-b50f-782a206d4fe1\") " Nov 26 06:00:26 crc kubenswrapper[4871]: I1126 06:00:26.739504 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9861fac-72fd-4865-b50f-782a206d4fe1-utilities\") pod \"b9861fac-72fd-4865-b50f-782a206d4fe1\" (UID: \"b9861fac-72fd-4865-b50f-782a206d4fe1\") " Nov 26 06:00:26 crc kubenswrapper[4871]: I1126 06:00:26.739644 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fg77d\" (UniqueName: \"kubernetes.io/projected/b9861fac-72fd-4865-b50f-782a206d4fe1-kube-api-access-fg77d\") pod \"b9861fac-72fd-4865-b50f-782a206d4fe1\" (UID: \"b9861fac-72fd-4865-b50f-782a206d4fe1\") " Nov 26 06:00:26 crc kubenswrapper[4871]: I1126 06:00:26.740309 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9861fac-72fd-4865-b50f-782a206d4fe1-utilities" (OuterVolumeSpecName: "utilities") pod "b9861fac-72fd-4865-b50f-782a206d4fe1" (UID: "b9861fac-72fd-4865-b50f-782a206d4fe1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:00:26 crc kubenswrapper[4871]: I1126 06:00:26.741443 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9861fac-72fd-4865-b50f-782a206d4fe1-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:26 crc kubenswrapper[4871]: I1126 06:00:26.755217 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9861fac-72fd-4865-b50f-782a206d4fe1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b9861fac-72fd-4865-b50f-782a206d4fe1" (UID: "b9861fac-72fd-4865-b50f-782a206d4fe1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:00:26 crc kubenswrapper[4871]: I1126 06:00:26.762386 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9861fac-72fd-4865-b50f-782a206d4fe1-kube-api-access-fg77d" (OuterVolumeSpecName: "kube-api-access-fg77d") pod "b9861fac-72fd-4865-b50f-782a206d4fe1" (UID: "b9861fac-72fd-4865-b50f-782a206d4fe1"). InnerVolumeSpecName "kube-api-access-fg77d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:00:26 crc kubenswrapper[4871]: I1126 06:00:26.842784 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9861fac-72fd-4865-b50f-782a206d4fe1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:26 crc kubenswrapper[4871]: I1126 06:00:26.843085 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fg77d\" (UniqueName: \"kubernetes.io/projected/b9861fac-72fd-4865-b50f-782a206d4fe1-kube-api-access-fg77d\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:27 crc kubenswrapper[4871]: I1126 06:00:27.153203 4871 generic.go:334] "Generic (PLEG): container finished" podID="b9861fac-72fd-4865-b50f-782a206d4fe1" containerID="49e559fce08e784a71364264c80d1976dcaf89fb11f06886d4b4327d3832d100" exitCode=0 Nov 26 06:00:27 crc kubenswrapper[4871]: I1126 06:00:27.153267 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tcvc5" event={"ID":"b9861fac-72fd-4865-b50f-782a206d4fe1","Type":"ContainerDied","Data":"49e559fce08e784a71364264c80d1976dcaf89fb11f06886d4b4327d3832d100"} Nov 26 06:00:27 crc kubenswrapper[4871]: I1126 06:00:27.153306 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tcvc5" event={"ID":"b9861fac-72fd-4865-b50f-782a206d4fe1","Type":"ContainerDied","Data":"7f1d0e1d69f5d8556ea3a7aa57770f51f67309fb87eb1f0891990a5d3ef5c558"} Nov 26 06:00:27 crc kubenswrapper[4871]: I1126 06:00:27.153335 4871 scope.go:117] "RemoveContainer" containerID="49e559fce08e784a71364264c80d1976dcaf89fb11f06886d4b4327d3832d100" Nov 26 06:00:27 crc kubenswrapper[4871]: I1126 06:00:27.155196 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tcvc5" Nov 26 06:00:27 crc kubenswrapper[4871]: I1126 06:00:27.177486 4871 scope.go:117] "RemoveContainer" containerID="c7df3313555925f4dc4e3d6661e06b8aaa33933994a52d1756eb46c4aea1c97f" Nov 26 06:00:27 crc kubenswrapper[4871]: I1126 06:00:27.194960 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tcvc5"] Nov 26 06:00:27 crc kubenswrapper[4871]: I1126 06:00:27.204165 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tcvc5"] Nov 26 06:00:27 crc kubenswrapper[4871]: I1126 06:00:27.208908 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5gn8j" Nov 26 06:00:27 crc kubenswrapper[4871]: I1126 06:00:27.209205 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5gn8j" Nov 26 06:00:27 crc kubenswrapper[4871]: I1126 06:00:27.225543 4871 scope.go:117] "RemoveContainer" containerID="b1122f480d5cedead67221900dc179c0409a02fb22cdf949147338bdb0c8c6ae" Nov 26 06:00:27 crc kubenswrapper[4871]: I1126 06:00:27.272945 4871 scope.go:117] "RemoveContainer" containerID="49e559fce08e784a71364264c80d1976dcaf89fb11f06886d4b4327d3832d100" Nov 26 06:00:27 crc kubenswrapper[4871]: E1126 06:00:27.274263 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49e559fce08e784a71364264c80d1976dcaf89fb11f06886d4b4327d3832d100\": container with ID starting with 49e559fce08e784a71364264c80d1976dcaf89fb11f06886d4b4327d3832d100 not found: ID does not exist" containerID="49e559fce08e784a71364264c80d1976dcaf89fb11f06886d4b4327d3832d100" Nov 26 06:00:27 crc kubenswrapper[4871]: I1126 06:00:27.274310 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49e559fce08e784a71364264c80d1976dcaf89fb11f06886d4b4327d3832d100"} err="failed to get container status \"49e559fce08e784a71364264c80d1976dcaf89fb11f06886d4b4327d3832d100\": rpc error: code = NotFound desc = could not find container \"49e559fce08e784a71364264c80d1976dcaf89fb11f06886d4b4327d3832d100\": container with ID starting with 49e559fce08e784a71364264c80d1976dcaf89fb11f06886d4b4327d3832d100 not found: ID does not exist" Nov 26 06:00:27 crc kubenswrapper[4871]: I1126 06:00:27.274331 4871 scope.go:117] "RemoveContainer" containerID="c7df3313555925f4dc4e3d6661e06b8aaa33933994a52d1756eb46c4aea1c97f" Nov 26 06:00:27 crc kubenswrapper[4871]: E1126 06:00:27.274647 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7df3313555925f4dc4e3d6661e06b8aaa33933994a52d1756eb46c4aea1c97f\": container with ID starting with c7df3313555925f4dc4e3d6661e06b8aaa33933994a52d1756eb46c4aea1c97f not found: ID does not exist" containerID="c7df3313555925f4dc4e3d6661e06b8aaa33933994a52d1756eb46c4aea1c97f" Nov 26 06:00:27 crc kubenswrapper[4871]: I1126 06:00:27.274692 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7df3313555925f4dc4e3d6661e06b8aaa33933994a52d1756eb46c4aea1c97f"} err="failed to get container status \"c7df3313555925f4dc4e3d6661e06b8aaa33933994a52d1756eb46c4aea1c97f\": rpc error: code = NotFound desc = could not find container \"c7df3313555925f4dc4e3d6661e06b8aaa33933994a52d1756eb46c4aea1c97f\": container with ID starting with c7df3313555925f4dc4e3d6661e06b8aaa33933994a52d1756eb46c4aea1c97f not found: ID does not exist" Nov 26 06:00:27 crc kubenswrapper[4871]: I1126 06:00:27.274724 4871 scope.go:117] "RemoveContainer" containerID="b1122f480d5cedead67221900dc179c0409a02fb22cdf949147338bdb0c8c6ae" Nov 26 06:00:27 crc kubenswrapper[4871]: E1126 06:00:27.275056 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1122f480d5cedead67221900dc179c0409a02fb22cdf949147338bdb0c8c6ae\": container with ID starting with b1122f480d5cedead67221900dc179c0409a02fb22cdf949147338bdb0c8c6ae not found: ID does not exist" containerID="b1122f480d5cedead67221900dc179c0409a02fb22cdf949147338bdb0c8c6ae" Nov 26 06:00:27 crc kubenswrapper[4871]: I1126 06:00:27.275089 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1122f480d5cedead67221900dc179c0409a02fb22cdf949147338bdb0c8c6ae"} err="failed to get container status \"b1122f480d5cedead67221900dc179c0409a02fb22cdf949147338bdb0c8c6ae\": rpc error: code = NotFound desc = could not find container \"b1122f480d5cedead67221900dc179c0409a02fb22cdf949147338bdb0c8c6ae\": container with ID starting with b1122f480d5cedead67221900dc179c0409a02fb22cdf949147338bdb0c8c6ae not found: ID does not exist" Nov 26 06:00:28 crc kubenswrapper[4871]: I1126 06:00:28.267922 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5gn8j" podUID="812937ba-99fa-409f-9e16-d1fa71463236" containerName="registry-server" probeResult="failure" output=< Nov 26 06:00:28 crc kubenswrapper[4871]: timeout: failed to connect service ":50051" within 1s Nov 26 06:00:28 crc kubenswrapper[4871]: > Nov 26 06:00:28 crc kubenswrapper[4871]: I1126 06:00:28.522003 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9861fac-72fd-4865-b50f-782a206d4fe1" path="/var/lib/kubelet/pods/b9861fac-72fd-4865-b50f-782a206d4fe1/volumes" Nov 26 06:00:33 crc kubenswrapper[4871]: I1126 06:00:33.995682 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tshsl"] Nov 26 06:00:33 crc kubenswrapper[4871]: E1126 06:00:33.996922 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9861fac-72fd-4865-b50f-782a206d4fe1" containerName="extract-content" Nov 26 06:00:33 crc kubenswrapper[4871]: I1126 06:00:33.996948 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9861fac-72fd-4865-b50f-782a206d4fe1" containerName="extract-content" Nov 26 06:00:33 crc kubenswrapper[4871]: E1126 06:00:33.996965 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9861fac-72fd-4865-b50f-782a206d4fe1" containerName="registry-server" Nov 26 06:00:33 crc kubenswrapper[4871]: I1126 06:00:33.996977 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9861fac-72fd-4865-b50f-782a206d4fe1" containerName="registry-server" Nov 26 06:00:33 crc kubenswrapper[4871]: E1126 06:00:33.997029 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9861fac-72fd-4865-b50f-782a206d4fe1" containerName="extract-utilities" Nov 26 06:00:33 crc kubenswrapper[4871]: I1126 06:00:33.997044 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9861fac-72fd-4865-b50f-782a206d4fe1" containerName="extract-utilities" Nov 26 06:00:33 crc kubenswrapper[4871]: I1126 06:00:33.997426 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9861fac-72fd-4865-b50f-782a206d4fe1" containerName="registry-server" Nov 26 06:00:34 crc kubenswrapper[4871]: I1126 06:00:34.003674 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tshsl" Nov 26 06:00:34 crc kubenswrapper[4871]: I1126 06:00:34.056047 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tshsl"] Nov 26 06:00:34 crc kubenswrapper[4871]: I1126 06:00:34.127081 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/610c8417-f10d-408d-ad5e-59e2b5c310c6-utilities\") pod \"certified-operators-tshsl\" (UID: \"610c8417-f10d-408d-ad5e-59e2b5c310c6\") " pod="openshift-marketplace/certified-operators-tshsl" Nov 26 06:00:34 crc kubenswrapper[4871]: I1126 06:00:34.127562 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6nr5\" (UniqueName: \"kubernetes.io/projected/610c8417-f10d-408d-ad5e-59e2b5c310c6-kube-api-access-w6nr5\") pod \"certified-operators-tshsl\" (UID: \"610c8417-f10d-408d-ad5e-59e2b5c310c6\") " pod="openshift-marketplace/certified-operators-tshsl" Nov 26 06:00:34 crc kubenswrapper[4871]: I1126 06:00:34.128022 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/610c8417-f10d-408d-ad5e-59e2b5c310c6-catalog-content\") pod \"certified-operators-tshsl\" (UID: \"610c8417-f10d-408d-ad5e-59e2b5c310c6\") " pod="openshift-marketplace/certified-operators-tshsl" Nov 26 06:00:34 crc kubenswrapper[4871]: I1126 06:00:34.230275 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6nr5\" (UniqueName: \"kubernetes.io/projected/610c8417-f10d-408d-ad5e-59e2b5c310c6-kube-api-access-w6nr5\") pod \"certified-operators-tshsl\" (UID: \"610c8417-f10d-408d-ad5e-59e2b5c310c6\") " pod="openshift-marketplace/certified-operators-tshsl" Nov 26 06:00:34 crc kubenswrapper[4871]: I1126 06:00:34.230404 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/610c8417-f10d-408d-ad5e-59e2b5c310c6-catalog-content\") pod \"certified-operators-tshsl\" (UID: \"610c8417-f10d-408d-ad5e-59e2b5c310c6\") " pod="openshift-marketplace/certified-operators-tshsl" Nov 26 06:00:34 crc kubenswrapper[4871]: I1126 06:00:34.230480 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/610c8417-f10d-408d-ad5e-59e2b5c310c6-utilities\") pod \"certified-operators-tshsl\" (UID: \"610c8417-f10d-408d-ad5e-59e2b5c310c6\") " pod="openshift-marketplace/certified-operators-tshsl" Nov 26 06:00:34 crc kubenswrapper[4871]: I1126 06:00:34.231219 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/610c8417-f10d-408d-ad5e-59e2b5c310c6-utilities\") pod \"certified-operators-tshsl\" (UID: \"610c8417-f10d-408d-ad5e-59e2b5c310c6\") " pod="openshift-marketplace/certified-operators-tshsl" Nov 26 06:00:34 crc kubenswrapper[4871]: I1126 06:00:34.231250 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/610c8417-f10d-408d-ad5e-59e2b5c310c6-catalog-content\") pod \"certified-operators-tshsl\" (UID: \"610c8417-f10d-408d-ad5e-59e2b5c310c6\") " pod="openshift-marketplace/certified-operators-tshsl" Nov 26 06:00:34 crc kubenswrapper[4871]: I1126 06:00:34.254212 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6nr5\" (UniqueName: \"kubernetes.io/projected/610c8417-f10d-408d-ad5e-59e2b5c310c6-kube-api-access-w6nr5\") pod \"certified-operators-tshsl\" (UID: \"610c8417-f10d-408d-ad5e-59e2b5c310c6\") " pod="openshift-marketplace/certified-operators-tshsl" Nov 26 06:00:34 crc kubenswrapper[4871]: I1126 06:00:34.351569 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tshsl" Nov 26 06:00:34 crc kubenswrapper[4871]: I1126 06:00:34.728694 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tshsl"] Nov 26 06:00:35 crc kubenswrapper[4871]: I1126 06:00:35.239583 4871 generic.go:334] "Generic (PLEG): container finished" podID="610c8417-f10d-408d-ad5e-59e2b5c310c6" containerID="4c0fb49e8847248b52b886e7f319df35dc52f9d5890c9f8fa0ba264956df85ae" exitCode=0 Nov 26 06:00:35 crc kubenswrapper[4871]: I1126 06:00:35.239651 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tshsl" event={"ID":"610c8417-f10d-408d-ad5e-59e2b5c310c6","Type":"ContainerDied","Data":"4c0fb49e8847248b52b886e7f319df35dc52f9d5890c9f8fa0ba264956df85ae"} Nov 26 06:00:35 crc kubenswrapper[4871]: I1126 06:00:35.239913 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tshsl" event={"ID":"610c8417-f10d-408d-ad5e-59e2b5c310c6","Type":"ContainerStarted","Data":"b739c1f35dd867d1db5188d2f82188fc33c58c1299776546b64b3ded3502f698"} Nov 26 06:00:37 crc kubenswrapper[4871]: I1126 06:00:37.265593 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tshsl" event={"ID":"610c8417-f10d-408d-ad5e-59e2b5c310c6","Type":"ContainerStarted","Data":"bacad671de84b8902133b311aae4771a9e733ec48a656c463cc0fd7dc0ad35e6"} Nov 26 06:00:37 crc kubenswrapper[4871]: I1126 06:00:37.273082 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5gn8j" Nov 26 06:00:37 crc kubenswrapper[4871]: I1126 06:00:37.353545 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5gn8j" Nov 26 06:00:37 crc kubenswrapper[4871]: E1126 06:00:37.978691 4871 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod610c8417_f10d_408d_ad5e_59e2b5c310c6.slice/crio-bacad671de84b8902133b311aae4771a9e733ec48a656c463cc0fd7dc0ad35e6.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod610c8417_f10d_408d_ad5e_59e2b5c310c6.slice/crio-conmon-bacad671de84b8902133b311aae4771a9e733ec48a656c463cc0fd7dc0ad35e6.scope\": RecentStats: unable to find data in memory cache]" Nov 26 06:00:38 crc kubenswrapper[4871]: I1126 06:00:38.279979 4871 generic.go:334] "Generic (PLEG): container finished" podID="610c8417-f10d-408d-ad5e-59e2b5c310c6" containerID="bacad671de84b8902133b311aae4771a9e733ec48a656c463cc0fd7dc0ad35e6" exitCode=0 Nov 26 06:00:38 crc kubenswrapper[4871]: I1126 06:00:38.280101 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tshsl" event={"ID":"610c8417-f10d-408d-ad5e-59e2b5c310c6","Type":"ContainerDied","Data":"bacad671de84b8902133b311aae4771a9e733ec48a656c463cc0fd7dc0ad35e6"} Nov 26 06:00:39 crc kubenswrapper[4871]: I1126 06:00:39.298464 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tshsl" event={"ID":"610c8417-f10d-408d-ad5e-59e2b5c310c6","Type":"ContainerStarted","Data":"bf967be19643f8bdff40bd68a55510cf221dd4cc48da42de781d4b69f7165b6e"} Nov 26 06:00:39 crc kubenswrapper[4871]: I1126 06:00:39.319937 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tshsl" podStartSLOduration=2.727178529 podStartE2EDuration="6.319916901s" podCreationTimestamp="2025-11-26 06:00:33 +0000 UTC" firstStartedPulling="2025-11-26 06:00:35.242163377 +0000 UTC m=+2093.425214973" lastFinishedPulling="2025-11-26 06:00:38.834901759 +0000 UTC m=+2097.017953345" observedRunningTime="2025-11-26 06:00:39.313308627 +0000 UTC m=+2097.496360213" watchObservedRunningTime="2025-11-26 06:00:39.319916901 +0000 UTC m=+2097.502968487" Nov 26 06:00:39 crc kubenswrapper[4871]: I1126 06:00:39.568751 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5gn8j"] Nov 26 06:00:39 crc kubenswrapper[4871]: I1126 06:00:39.568971 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5gn8j" podUID="812937ba-99fa-409f-9e16-d1fa71463236" containerName="registry-server" containerID="cri-o://1e2e35069527c4fb3b218774a7be7e05dc2c4d91f6dba2fd70b40d1441f106e7" gracePeriod=2 Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.062155 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5gn8j" Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.166716 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kq5kp\" (UniqueName: \"kubernetes.io/projected/812937ba-99fa-409f-9e16-d1fa71463236-kube-api-access-kq5kp\") pod \"812937ba-99fa-409f-9e16-d1fa71463236\" (UID: \"812937ba-99fa-409f-9e16-d1fa71463236\") " Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.166802 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/812937ba-99fa-409f-9e16-d1fa71463236-catalog-content\") pod \"812937ba-99fa-409f-9e16-d1fa71463236\" (UID: \"812937ba-99fa-409f-9e16-d1fa71463236\") " Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.167113 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/812937ba-99fa-409f-9e16-d1fa71463236-utilities\") pod \"812937ba-99fa-409f-9e16-d1fa71463236\" (UID: \"812937ba-99fa-409f-9e16-d1fa71463236\") " Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.167761 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/812937ba-99fa-409f-9e16-d1fa71463236-utilities" (OuterVolumeSpecName: "utilities") pod "812937ba-99fa-409f-9e16-d1fa71463236" (UID: "812937ba-99fa-409f-9e16-d1fa71463236"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.175882 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/812937ba-99fa-409f-9e16-d1fa71463236-kube-api-access-kq5kp" (OuterVolumeSpecName: "kube-api-access-kq5kp") pod "812937ba-99fa-409f-9e16-d1fa71463236" (UID: "812937ba-99fa-409f-9e16-d1fa71463236"). InnerVolumeSpecName "kube-api-access-kq5kp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.271089 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kq5kp\" (UniqueName: \"kubernetes.io/projected/812937ba-99fa-409f-9e16-d1fa71463236-kube-api-access-kq5kp\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.271150 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/812937ba-99fa-409f-9e16-d1fa71463236-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.298783 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/812937ba-99fa-409f-9e16-d1fa71463236-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "812937ba-99fa-409f-9e16-d1fa71463236" (UID: "812937ba-99fa-409f-9e16-d1fa71463236"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.313755 4871 generic.go:334] "Generic (PLEG): container finished" podID="812937ba-99fa-409f-9e16-d1fa71463236" containerID="1e2e35069527c4fb3b218774a7be7e05dc2c4d91f6dba2fd70b40d1441f106e7" exitCode=0 Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.314652 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5gn8j" Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.314937 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5gn8j" event={"ID":"812937ba-99fa-409f-9e16-d1fa71463236","Type":"ContainerDied","Data":"1e2e35069527c4fb3b218774a7be7e05dc2c4d91f6dba2fd70b40d1441f106e7"} Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.314968 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5gn8j" event={"ID":"812937ba-99fa-409f-9e16-d1fa71463236","Type":"ContainerDied","Data":"bcac81a4f44ed69575a2f5cb04cbe2dd86a10973d15edb9251e598728b223c77"} Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.314989 4871 scope.go:117] "RemoveContainer" containerID="1e2e35069527c4fb3b218774a7be7e05dc2c4d91f6dba2fd70b40d1441f106e7" Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.353872 4871 scope.go:117] "RemoveContainer" containerID="bb1fe90e22e4e0dbf32e9bc271b0ceac036f8f69ae1e740dbcaa3d6a5c1a5c1d" Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.356764 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5gn8j"] Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.367517 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5gn8j"] Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.375341 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/812937ba-99fa-409f-9e16-d1fa71463236-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.380174 4871 scope.go:117] "RemoveContainer" containerID="b0ca1ccd3371f8291872e2fd90bc0958da150b1dcc9cd0ed176b1bc9f9e1d944" Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.443826 4871 scope.go:117] "RemoveContainer" containerID="1e2e35069527c4fb3b218774a7be7e05dc2c4d91f6dba2fd70b40d1441f106e7" Nov 26 06:00:40 crc kubenswrapper[4871]: E1126 06:00:40.444250 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e2e35069527c4fb3b218774a7be7e05dc2c4d91f6dba2fd70b40d1441f106e7\": container with ID starting with 1e2e35069527c4fb3b218774a7be7e05dc2c4d91f6dba2fd70b40d1441f106e7 not found: ID does not exist" containerID="1e2e35069527c4fb3b218774a7be7e05dc2c4d91f6dba2fd70b40d1441f106e7" Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.444319 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e2e35069527c4fb3b218774a7be7e05dc2c4d91f6dba2fd70b40d1441f106e7"} err="failed to get container status \"1e2e35069527c4fb3b218774a7be7e05dc2c4d91f6dba2fd70b40d1441f106e7\": rpc error: code = NotFound desc = could not find container \"1e2e35069527c4fb3b218774a7be7e05dc2c4d91f6dba2fd70b40d1441f106e7\": container with ID starting with 1e2e35069527c4fb3b218774a7be7e05dc2c4d91f6dba2fd70b40d1441f106e7 not found: ID does not exist" Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.444362 4871 scope.go:117] "RemoveContainer" containerID="bb1fe90e22e4e0dbf32e9bc271b0ceac036f8f69ae1e740dbcaa3d6a5c1a5c1d" Nov 26 06:00:40 crc kubenswrapper[4871]: E1126 06:00:40.444857 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb1fe90e22e4e0dbf32e9bc271b0ceac036f8f69ae1e740dbcaa3d6a5c1a5c1d\": container with ID starting with bb1fe90e22e4e0dbf32e9bc271b0ceac036f8f69ae1e740dbcaa3d6a5c1a5c1d not found: ID does not exist" containerID="bb1fe90e22e4e0dbf32e9bc271b0ceac036f8f69ae1e740dbcaa3d6a5c1a5c1d" Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.444902 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb1fe90e22e4e0dbf32e9bc271b0ceac036f8f69ae1e740dbcaa3d6a5c1a5c1d"} err="failed to get container status \"bb1fe90e22e4e0dbf32e9bc271b0ceac036f8f69ae1e740dbcaa3d6a5c1a5c1d\": rpc error: code = NotFound desc = could not find container \"bb1fe90e22e4e0dbf32e9bc271b0ceac036f8f69ae1e740dbcaa3d6a5c1a5c1d\": container with ID starting with bb1fe90e22e4e0dbf32e9bc271b0ceac036f8f69ae1e740dbcaa3d6a5c1a5c1d not found: ID does not exist" Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.444931 4871 scope.go:117] "RemoveContainer" containerID="b0ca1ccd3371f8291872e2fd90bc0958da150b1dcc9cd0ed176b1bc9f9e1d944" Nov 26 06:00:40 crc kubenswrapper[4871]: E1126 06:00:40.445206 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0ca1ccd3371f8291872e2fd90bc0958da150b1dcc9cd0ed176b1bc9f9e1d944\": container with ID starting with b0ca1ccd3371f8291872e2fd90bc0958da150b1dcc9cd0ed176b1bc9f9e1d944 not found: ID does not exist" containerID="b0ca1ccd3371f8291872e2fd90bc0958da150b1dcc9cd0ed176b1bc9f9e1d944" Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.445249 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0ca1ccd3371f8291872e2fd90bc0958da150b1dcc9cd0ed176b1bc9f9e1d944"} err="failed to get container status \"b0ca1ccd3371f8291872e2fd90bc0958da150b1dcc9cd0ed176b1bc9f9e1d944\": rpc error: code = NotFound desc = could not find container \"b0ca1ccd3371f8291872e2fd90bc0958da150b1dcc9cd0ed176b1bc9f9e1d944\": container with ID starting with b0ca1ccd3371f8291872e2fd90bc0958da150b1dcc9cd0ed176b1bc9f9e1d944 not found: ID does not exist" Nov 26 06:00:40 crc kubenswrapper[4871]: I1126 06:00:40.521940 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="812937ba-99fa-409f-9e16-d1fa71463236" path="/var/lib/kubelet/pods/812937ba-99fa-409f-9e16-d1fa71463236/volumes" Nov 26 06:00:42 crc kubenswrapper[4871]: I1126 06:00:42.336292 4871 generic.go:334] "Generic (PLEG): container finished" podID="48acdf72-822b-456b-b545-bd1499db855d" containerID="46cb2c0cf488fd2476f4db6d09471fcbe9a163c43a48dc08ad552a22abf68422" exitCode=0 Nov 26 06:00:42 crc kubenswrapper[4871]: I1126 06:00:42.336384 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" event={"ID":"48acdf72-822b-456b-b545-bd1499db855d","Type":"ContainerDied","Data":"46cb2c0cf488fd2476f4db6d09471fcbe9a163c43a48dc08ad552a22abf68422"} Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.842176 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.970484 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-telemetry-combined-ca-bundle\") pod \"48acdf72-822b-456b-b545-bd1499db855d\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.970606 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"48acdf72-822b-456b-b545-bd1499db855d\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.971864 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"48acdf72-822b-456b-b545-bd1499db855d\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.971922 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-bootstrap-combined-ca-bundle\") pod \"48acdf72-822b-456b-b545-bd1499db855d\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.971981 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-inventory\") pod \"48acdf72-822b-456b-b545-bd1499db855d\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.972030 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-libvirt-combined-ca-bundle\") pod \"48acdf72-822b-456b-b545-bd1499db855d\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.972109 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-ovn-default-certs-0\") pod \"48acdf72-822b-456b-b545-bd1499db855d\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.972179 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fvtdw\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-kube-api-access-fvtdw\") pod \"48acdf72-822b-456b-b545-bd1499db855d\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.972232 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"48acdf72-822b-456b-b545-bd1499db855d\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.972331 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-ssh-key\") pod \"48acdf72-822b-456b-b545-bd1499db855d\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.972416 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-nova-combined-ca-bundle\") pod \"48acdf72-822b-456b-b545-bd1499db855d\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.972464 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-neutron-metadata-combined-ca-bundle\") pod \"48acdf72-822b-456b-b545-bd1499db855d\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.972673 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-repo-setup-combined-ca-bundle\") pod \"48acdf72-822b-456b-b545-bd1499db855d\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.972724 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-ovn-combined-ca-bundle\") pod \"48acdf72-822b-456b-b545-bd1499db855d\" (UID: \"48acdf72-822b-456b-b545-bd1499db855d\") " Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.978442 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-kube-api-access-fvtdw" (OuterVolumeSpecName: "kube-api-access-fvtdw") pod "48acdf72-822b-456b-b545-bd1499db855d" (UID: "48acdf72-822b-456b-b545-bd1499db855d"). InnerVolumeSpecName "kube-api-access-fvtdw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.978904 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "48acdf72-822b-456b-b545-bd1499db855d" (UID: "48acdf72-822b-456b-b545-bd1499db855d"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.979343 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "48acdf72-822b-456b-b545-bd1499db855d" (UID: "48acdf72-822b-456b-b545-bd1499db855d"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.979769 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "48acdf72-822b-456b-b545-bd1499db855d" (UID: "48acdf72-822b-456b-b545-bd1499db855d"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.979797 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "48acdf72-822b-456b-b545-bd1499db855d" (UID: "48acdf72-822b-456b-b545-bd1499db855d"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.979995 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "48acdf72-822b-456b-b545-bd1499db855d" (UID: "48acdf72-822b-456b-b545-bd1499db855d"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.982905 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "48acdf72-822b-456b-b545-bd1499db855d" (UID: "48acdf72-822b-456b-b545-bd1499db855d"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.982983 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "48acdf72-822b-456b-b545-bd1499db855d" (UID: "48acdf72-822b-456b-b545-bd1499db855d"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.983731 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "48acdf72-822b-456b-b545-bd1499db855d" (UID: "48acdf72-822b-456b-b545-bd1499db855d"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.983853 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "48acdf72-822b-456b-b545-bd1499db855d" (UID: "48acdf72-822b-456b-b545-bd1499db855d"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.985983 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "48acdf72-822b-456b-b545-bd1499db855d" (UID: "48acdf72-822b-456b-b545-bd1499db855d"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:00:43 crc kubenswrapper[4871]: I1126 06:00:43.986652 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "48acdf72-822b-456b-b545-bd1499db855d" (UID: "48acdf72-822b-456b-b545-bd1499db855d"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.007135 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-inventory" (OuterVolumeSpecName: "inventory") pod "48acdf72-822b-456b-b545-bd1499db855d" (UID: "48acdf72-822b-456b-b545-bd1499db855d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.007412 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "48acdf72-822b-456b-b545-bd1499db855d" (UID: "48acdf72-822b-456b-b545-bd1499db855d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.075004 4871 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.075046 4871 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.075064 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fvtdw\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-kube-api-access-fvtdw\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.075078 4871 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.075094 4871 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.075106 4871 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.075118 4871 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.075130 4871 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.075144 4871 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.075157 4871 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.075168 4871 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.075181 4871 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/48acdf72-822b-456b-b545-bd1499db855d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.075193 4871 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.075208 4871 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/48acdf72-822b-456b-b545-bd1499db855d-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.352030 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tshsl" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.352340 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tshsl" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.376649 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" event={"ID":"48acdf72-822b-456b-b545-bd1499db855d","Type":"ContainerDied","Data":"cd30a429a670d2caddace583bf7a93aaa2a299dc3b45c9fc01de247b44e0fead"} Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.376695 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cd30a429a670d2caddace583bf7a93aaa2a299dc3b45c9fc01de247b44e0fead" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.376738 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.423260 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tshsl" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.532697 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl"] Nov 26 06:00:44 crc kubenswrapper[4871]: E1126 06:00:44.533317 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="812937ba-99fa-409f-9e16-d1fa71463236" containerName="registry-server" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.533333 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="812937ba-99fa-409f-9e16-d1fa71463236" containerName="registry-server" Nov 26 06:00:44 crc kubenswrapper[4871]: E1126 06:00:44.533345 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="812937ba-99fa-409f-9e16-d1fa71463236" containerName="extract-content" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.533352 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="812937ba-99fa-409f-9e16-d1fa71463236" containerName="extract-content" Nov 26 06:00:44 crc kubenswrapper[4871]: E1126 06:00:44.533379 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48acdf72-822b-456b-b545-bd1499db855d" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.533388 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="48acdf72-822b-456b-b545-bd1499db855d" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 26 06:00:44 crc kubenswrapper[4871]: E1126 06:00:44.533412 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="812937ba-99fa-409f-9e16-d1fa71463236" containerName="extract-utilities" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.533418 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="812937ba-99fa-409f-9e16-d1fa71463236" containerName="extract-utilities" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.533626 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="48acdf72-822b-456b-b545-bd1499db855d" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.533638 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="812937ba-99fa-409f-9e16-d1fa71463236" containerName="registry-server" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.534269 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.536794 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.536949 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.537854 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.538317 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.538355 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-pjzlp" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.544166 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl"] Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.685590 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d747185-1d52-4102-be05-7f18ff179f3a-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-896gl\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.685678 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/8d747185-1d52-4102-be05-7f18ff179f3a-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-896gl\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.685733 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d747185-1d52-4102-be05-7f18ff179f3a-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-896gl\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.685767 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d747185-1d52-4102-be05-7f18ff179f3a-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-896gl\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.685884 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2776\" (UniqueName: \"kubernetes.io/projected/8d747185-1d52-4102-be05-7f18ff179f3a-kube-api-access-j2776\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-896gl\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.787458 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/8d747185-1d52-4102-be05-7f18ff179f3a-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-896gl\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.793309 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d747185-1d52-4102-be05-7f18ff179f3a-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-896gl\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.793444 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d747185-1d52-4102-be05-7f18ff179f3a-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-896gl\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.793691 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2776\" (UniqueName: \"kubernetes.io/projected/8d747185-1d52-4102-be05-7f18ff179f3a-kube-api-access-j2776\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-896gl\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.793849 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d747185-1d52-4102-be05-7f18ff179f3a-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-896gl\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.801584 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/8d747185-1d52-4102-be05-7f18ff179f3a-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-896gl\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.807807 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d747185-1d52-4102-be05-7f18ff179f3a-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-896gl\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.811148 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d747185-1d52-4102-be05-7f18ff179f3a-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-896gl\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.821419 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d747185-1d52-4102-be05-7f18ff179f3a-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-896gl\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.835128 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2776\" (UniqueName: \"kubernetes.io/projected/8d747185-1d52-4102-be05-7f18ff179f3a-kube-api-access-j2776\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-896gl\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" Nov 26 06:00:44 crc kubenswrapper[4871]: I1126 06:00:44.862928 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" Nov 26 06:00:45 crc kubenswrapper[4871]: I1126 06:00:45.441790 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl"] Nov 26 06:00:45 crc kubenswrapper[4871]: I1126 06:00:45.448985 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tshsl" Nov 26 06:00:46 crc kubenswrapper[4871]: I1126 06:00:46.406464 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" event={"ID":"8d747185-1d52-4102-be05-7f18ff179f3a","Type":"ContainerStarted","Data":"ee6f51c7e5ab819bb5c3f1c91b4cb9c634d5351ee9f8f8bbb9ce07ede5684dad"} Nov 26 06:00:46 crc kubenswrapper[4871]: I1126 06:00:46.407210 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" event={"ID":"8d747185-1d52-4102-be05-7f18ff179f3a","Type":"ContainerStarted","Data":"b2f990eb94e5db51a9643321ca4c1af2764271a388229449f06da3d65b1cc122"} Nov 26 06:00:46 crc kubenswrapper[4871]: I1126 06:00:46.431836 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" podStartSLOduration=1.921125306 podStartE2EDuration="2.431811126s" podCreationTimestamp="2025-11-26 06:00:44 +0000 UTC" firstStartedPulling="2025-11-26 06:00:45.456167603 +0000 UTC m=+2103.639219189" lastFinishedPulling="2025-11-26 06:00:45.966853413 +0000 UTC m=+2104.149905009" observedRunningTime="2025-11-26 06:00:46.424668529 +0000 UTC m=+2104.607720135" watchObservedRunningTime="2025-11-26 06:00:46.431811126 +0000 UTC m=+2104.614862722" Nov 26 06:00:46 crc kubenswrapper[4871]: I1126 06:00:46.590230 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tshsl"] Nov 26 06:00:47 crc kubenswrapper[4871]: I1126 06:00:47.416067 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tshsl" podUID="610c8417-f10d-408d-ad5e-59e2b5c310c6" containerName="registry-server" containerID="cri-o://bf967be19643f8bdff40bd68a55510cf221dd4cc48da42de781d4b69f7165b6e" gracePeriod=2 Nov 26 06:00:47 crc kubenswrapper[4871]: I1126 06:00:47.918745 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tshsl" Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.066749 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/610c8417-f10d-408d-ad5e-59e2b5c310c6-catalog-content\") pod \"610c8417-f10d-408d-ad5e-59e2b5c310c6\" (UID: \"610c8417-f10d-408d-ad5e-59e2b5c310c6\") " Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.066884 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6nr5\" (UniqueName: \"kubernetes.io/projected/610c8417-f10d-408d-ad5e-59e2b5c310c6-kube-api-access-w6nr5\") pod \"610c8417-f10d-408d-ad5e-59e2b5c310c6\" (UID: \"610c8417-f10d-408d-ad5e-59e2b5c310c6\") " Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.067082 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/610c8417-f10d-408d-ad5e-59e2b5c310c6-utilities\") pod \"610c8417-f10d-408d-ad5e-59e2b5c310c6\" (UID: \"610c8417-f10d-408d-ad5e-59e2b5c310c6\") " Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.068965 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/610c8417-f10d-408d-ad5e-59e2b5c310c6-utilities" (OuterVolumeSpecName: "utilities") pod "610c8417-f10d-408d-ad5e-59e2b5c310c6" (UID: "610c8417-f10d-408d-ad5e-59e2b5c310c6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.077371 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/610c8417-f10d-408d-ad5e-59e2b5c310c6-kube-api-access-w6nr5" (OuterVolumeSpecName: "kube-api-access-w6nr5") pod "610c8417-f10d-408d-ad5e-59e2b5c310c6" (UID: "610c8417-f10d-408d-ad5e-59e2b5c310c6"). InnerVolumeSpecName "kube-api-access-w6nr5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.124823 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/610c8417-f10d-408d-ad5e-59e2b5c310c6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "610c8417-f10d-408d-ad5e-59e2b5c310c6" (UID: "610c8417-f10d-408d-ad5e-59e2b5c310c6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.169337 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/610c8417-f10d-408d-ad5e-59e2b5c310c6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.169366 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6nr5\" (UniqueName: \"kubernetes.io/projected/610c8417-f10d-408d-ad5e-59e2b5c310c6-kube-api-access-w6nr5\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.169378 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/610c8417-f10d-408d-ad5e-59e2b5c310c6-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.445995 4871 generic.go:334] "Generic (PLEG): container finished" podID="610c8417-f10d-408d-ad5e-59e2b5c310c6" containerID="bf967be19643f8bdff40bd68a55510cf221dd4cc48da42de781d4b69f7165b6e" exitCode=0 Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.446074 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tshsl" event={"ID":"610c8417-f10d-408d-ad5e-59e2b5c310c6","Type":"ContainerDied","Data":"bf967be19643f8bdff40bd68a55510cf221dd4cc48da42de781d4b69f7165b6e"} Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.446130 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tshsl" event={"ID":"610c8417-f10d-408d-ad5e-59e2b5c310c6","Type":"ContainerDied","Data":"b739c1f35dd867d1db5188d2f82188fc33c58c1299776546b64b3ded3502f698"} Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.446154 4871 scope.go:117] "RemoveContainer" containerID="bf967be19643f8bdff40bd68a55510cf221dd4cc48da42de781d4b69f7165b6e" Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.446475 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tshsl" Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.479832 4871 scope.go:117] "RemoveContainer" containerID="bacad671de84b8902133b311aae4771a9e733ec48a656c463cc0fd7dc0ad35e6" Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.505031 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tshsl"] Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.517443 4871 scope.go:117] "RemoveContainer" containerID="4c0fb49e8847248b52b886e7f319df35dc52f9d5890c9f8fa0ba264956df85ae" Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.519320 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tshsl"] Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.566283 4871 scope.go:117] "RemoveContainer" containerID="bf967be19643f8bdff40bd68a55510cf221dd4cc48da42de781d4b69f7165b6e" Nov 26 06:00:48 crc kubenswrapper[4871]: E1126 06:00:48.566848 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf967be19643f8bdff40bd68a55510cf221dd4cc48da42de781d4b69f7165b6e\": container with ID starting with bf967be19643f8bdff40bd68a55510cf221dd4cc48da42de781d4b69f7165b6e not found: ID does not exist" containerID="bf967be19643f8bdff40bd68a55510cf221dd4cc48da42de781d4b69f7165b6e" Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.566884 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf967be19643f8bdff40bd68a55510cf221dd4cc48da42de781d4b69f7165b6e"} err="failed to get container status \"bf967be19643f8bdff40bd68a55510cf221dd4cc48da42de781d4b69f7165b6e\": rpc error: code = NotFound desc = could not find container \"bf967be19643f8bdff40bd68a55510cf221dd4cc48da42de781d4b69f7165b6e\": container with ID starting with bf967be19643f8bdff40bd68a55510cf221dd4cc48da42de781d4b69f7165b6e not found: ID does not exist" Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.566905 4871 scope.go:117] "RemoveContainer" containerID="bacad671de84b8902133b311aae4771a9e733ec48a656c463cc0fd7dc0ad35e6" Nov 26 06:00:48 crc kubenswrapper[4871]: E1126 06:00:48.567299 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bacad671de84b8902133b311aae4771a9e733ec48a656c463cc0fd7dc0ad35e6\": container with ID starting with bacad671de84b8902133b311aae4771a9e733ec48a656c463cc0fd7dc0ad35e6 not found: ID does not exist" containerID="bacad671de84b8902133b311aae4771a9e733ec48a656c463cc0fd7dc0ad35e6" Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.567333 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bacad671de84b8902133b311aae4771a9e733ec48a656c463cc0fd7dc0ad35e6"} err="failed to get container status \"bacad671de84b8902133b311aae4771a9e733ec48a656c463cc0fd7dc0ad35e6\": rpc error: code = NotFound desc = could not find container \"bacad671de84b8902133b311aae4771a9e733ec48a656c463cc0fd7dc0ad35e6\": container with ID starting with bacad671de84b8902133b311aae4771a9e733ec48a656c463cc0fd7dc0ad35e6 not found: ID does not exist" Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.567354 4871 scope.go:117] "RemoveContainer" containerID="4c0fb49e8847248b52b886e7f319df35dc52f9d5890c9f8fa0ba264956df85ae" Nov 26 06:00:48 crc kubenswrapper[4871]: E1126 06:00:48.567652 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c0fb49e8847248b52b886e7f319df35dc52f9d5890c9f8fa0ba264956df85ae\": container with ID starting with 4c0fb49e8847248b52b886e7f319df35dc52f9d5890c9f8fa0ba264956df85ae not found: ID does not exist" containerID="4c0fb49e8847248b52b886e7f319df35dc52f9d5890c9f8fa0ba264956df85ae" Nov 26 06:00:48 crc kubenswrapper[4871]: I1126 06:00:48.567674 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c0fb49e8847248b52b886e7f319df35dc52f9d5890c9f8fa0ba264956df85ae"} err="failed to get container status \"4c0fb49e8847248b52b886e7f319df35dc52f9d5890c9f8fa0ba264956df85ae\": rpc error: code = NotFound desc = could not find container \"4c0fb49e8847248b52b886e7f319df35dc52f9d5890c9f8fa0ba264956df85ae\": container with ID starting with 4c0fb49e8847248b52b886e7f319df35dc52f9d5890c9f8fa0ba264956df85ae not found: ID does not exist" Nov 26 06:00:50 crc kubenswrapper[4871]: I1126 06:00:50.531235 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="610c8417-f10d-408d-ad5e-59e2b5c310c6" path="/var/lib/kubelet/pods/610c8417-f10d-408d-ad5e-59e2b5c310c6/volumes" Nov 26 06:00:51 crc kubenswrapper[4871]: I1126 06:00:51.188840 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-72vst"] Nov 26 06:00:51 crc kubenswrapper[4871]: E1126 06:00:51.189254 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="610c8417-f10d-408d-ad5e-59e2b5c310c6" containerName="registry-server" Nov 26 06:00:51 crc kubenswrapper[4871]: I1126 06:00:51.189272 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="610c8417-f10d-408d-ad5e-59e2b5c310c6" containerName="registry-server" Nov 26 06:00:51 crc kubenswrapper[4871]: E1126 06:00:51.189294 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="610c8417-f10d-408d-ad5e-59e2b5c310c6" containerName="extract-content" Nov 26 06:00:51 crc kubenswrapper[4871]: I1126 06:00:51.189300 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="610c8417-f10d-408d-ad5e-59e2b5c310c6" containerName="extract-content" Nov 26 06:00:51 crc kubenswrapper[4871]: E1126 06:00:51.189316 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="610c8417-f10d-408d-ad5e-59e2b5c310c6" containerName="extract-utilities" Nov 26 06:00:51 crc kubenswrapper[4871]: I1126 06:00:51.189322 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="610c8417-f10d-408d-ad5e-59e2b5c310c6" containerName="extract-utilities" Nov 26 06:00:51 crc kubenswrapper[4871]: I1126 06:00:51.189590 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="610c8417-f10d-408d-ad5e-59e2b5c310c6" containerName="registry-server" Nov 26 06:00:51 crc kubenswrapper[4871]: I1126 06:00:51.190957 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-72vst" Nov 26 06:00:51 crc kubenswrapper[4871]: I1126 06:00:51.215156 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-72vst"] Nov 26 06:00:51 crc kubenswrapper[4871]: I1126 06:00:51.336480 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/111dc800-8903-483c-8420-322a90bf27e1-catalog-content\") pod \"community-operators-72vst\" (UID: \"111dc800-8903-483c-8420-322a90bf27e1\") " pod="openshift-marketplace/community-operators-72vst" Nov 26 06:00:51 crc kubenswrapper[4871]: I1126 06:00:51.336669 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/111dc800-8903-483c-8420-322a90bf27e1-utilities\") pod \"community-operators-72vst\" (UID: \"111dc800-8903-483c-8420-322a90bf27e1\") " pod="openshift-marketplace/community-operators-72vst" Nov 26 06:00:51 crc kubenswrapper[4871]: I1126 06:00:51.336718 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxlgq\" (UniqueName: \"kubernetes.io/projected/111dc800-8903-483c-8420-322a90bf27e1-kube-api-access-nxlgq\") pod \"community-operators-72vst\" (UID: \"111dc800-8903-483c-8420-322a90bf27e1\") " pod="openshift-marketplace/community-operators-72vst" Nov 26 06:00:51 crc kubenswrapper[4871]: I1126 06:00:51.439063 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/111dc800-8903-483c-8420-322a90bf27e1-catalog-content\") pod \"community-operators-72vst\" (UID: \"111dc800-8903-483c-8420-322a90bf27e1\") " pod="openshift-marketplace/community-operators-72vst" Nov 26 06:00:51 crc kubenswrapper[4871]: I1126 06:00:51.439152 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/111dc800-8903-483c-8420-322a90bf27e1-utilities\") pod \"community-operators-72vst\" (UID: \"111dc800-8903-483c-8420-322a90bf27e1\") " pod="openshift-marketplace/community-operators-72vst" Nov 26 06:00:51 crc kubenswrapper[4871]: I1126 06:00:51.439201 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxlgq\" (UniqueName: \"kubernetes.io/projected/111dc800-8903-483c-8420-322a90bf27e1-kube-api-access-nxlgq\") pod \"community-operators-72vst\" (UID: \"111dc800-8903-483c-8420-322a90bf27e1\") " pod="openshift-marketplace/community-operators-72vst" Nov 26 06:00:51 crc kubenswrapper[4871]: I1126 06:00:51.440127 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/111dc800-8903-483c-8420-322a90bf27e1-catalog-content\") pod \"community-operators-72vst\" (UID: \"111dc800-8903-483c-8420-322a90bf27e1\") " pod="openshift-marketplace/community-operators-72vst" Nov 26 06:00:51 crc kubenswrapper[4871]: I1126 06:00:51.440176 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/111dc800-8903-483c-8420-322a90bf27e1-utilities\") pod \"community-operators-72vst\" (UID: \"111dc800-8903-483c-8420-322a90bf27e1\") " pod="openshift-marketplace/community-operators-72vst" Nov 26 06:00:51 crc kubenswrapper[4871]: I1126 06:00:51.463635 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxlgq\" (UniqueName: \"kubernetes.io/projected/111dc800-8903-483c-8420-322a90bf27e1-kube-api-access-nxlgq\") pod \"community-operators-72vst\" (UID: \"111dc800-8903-483c-8420-322a90bf27e1\") " pod="openshift-marketplace/community-operators-72vst" Nov 26 06:00:51 crc kubenswrapper[4871]: I1126 06:00:51.534292 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-72vst" Nov 26 06:00:52 crc kubenswrapper[4871]: I1126 06:00:52.140576 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-72vst"] Nov 26 06:00:52 crc kubenswrapper[4871]: I1126 06:00:52.497506 4871 generic.go:334] "Generic (PLEG): container finished" podID="111dc800-8903-483c-8420-322a90bf27e1" containerID="268ea80b44049943c7e59be50191e2d0cf6a1847b715f160b81ca6da82b9de94" exitCode=0 Nov 26 06:00:52 crc kubenswrapper[4871]: I1126 06:00:52.497599 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-72vst" event={"ID":"111dc800-8903-483c-8420-322a90bf27e1","Type":"ContainerDied","Data":"268ea80b44049943c7e59be50191e2d0cf6a1847b715f160b81ca6da82b9de94"} Nov 26 06:00:52 crc kubenswrapper[4871]: I1126 06:00:52.497635 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-72vst" event={"ID":"111dc800-8903-483c-8420-322a90bf27e1","Type":"ContainerStarted","Data":"5bb8f959dfbcb38f0ddb35ab93790d741a2737292d86e0e91ce78f2843da9349"} Nov 26 06:00:53 crc kubenswrapper[4871]: I1126 06:00:53.508212 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-72vst" event={"ID":"111dc800-8903-483c-8420-322a90bf27e1","Type":"ContainerStarted","Data":"ce6d2be364dad152e03480643a047a517cb8b0755a3593cad6ac8de93d471323"} Nov 26 06:00:54 crc kubenswrapper[4871]: I1126 06:00:54.520857 4871 generic.go:334] "Generic (PLEG): container finished" podID="111dc800-8903-483c-8420-322a90bf27e1" containerID="ce6d2be364dad152e03480643a047a517cb8b0755a3593cad6ac8de93d471323" exitCode=0 Nov 26 06:00:54 crc kubenswrapper[4871]: I1126 06:00:54.533918 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-72vst" event={"ID":"111dc800-8903-483c-8420-322a90bf27e1","Type":"ContainerDied","Data":"ce6d2be364dad152e03480643a047a517cb8b0755a3593cad6ac8de93d471323"} Nov 26 06:00:55 crc kubenswrapper[4871]: I1126 06:00:55.535700 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-72vst" event={"ID":"111dc800-8903-483c-8420-322a90bf27e1","Type":"ContainerStarted","Data":"be2914f6c7d6781a202742d7ce8cf49550e5c5ef1f0e46e11618820e6c2f1172"} Nov 26 06:00:55 crc kubenswrapper[4871]: I1126 06:00:55.575672 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-72vst" podStartSLOduration=2.143713633 podStartE2EDuration="4.575654902s" podCreationTimestamp="2025-11-26 06:00:51 +0000 UTC" firstStartedPulling="2025-11-26 06:00:52.500112921 +0000 UTC m=+2110.683164507" lastFinishedPulling="2025-11-26 06:00:54.93205418 +0000 UTC m=+2113.115105776" observedRunningTime="2025-11-26 06:00:55.558918766 +0000 UTC m=+2113.741970352" watchObservedRunningTime="2025-11-26 06:00:55.575654902 +0000 UTC m=+2113.758706488" Nov 26 06:01:00 crc kubenswrapper[4871]: I1126 06:01:00.133343 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29402281-4rm7g"] Nov 26 06:01:00 crc kubenswrapper[4871]: I1126 06:01:00.135808 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402281-4rm7g" Nov 26 06:01:00 crc kubenswrapper[4871]: I1126 06:01:00.164403 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29402281-4rm7g"] Nov 26 06:01:00 crc kubenswrapper[4871]: I1126 06:01:00.266186 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdfnr\" (UniqueName: \"kubernetes.io/projected/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-kube-api-access-bdfnr\") pod \"keystone-cron-29402281-4rm7g\" (UID: \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\") " pod="openstack/keystone-cron-29402281-4rm7g" Nov 26 06:01:00 crc kubenswrapper[4871]: I1126 06:01:00.266334 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-fernet-keys\") pod \"keystone-cron-29402281-4rm7g\" (UID: \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\") " pod="openstack/keystone-cron-29402281-4rm7g" Nov 26 06:01:00 crc kubenswrapper[4871]: I1126 06:01:00.266469 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-config-data\") pod \"keystone-cron-29402281-4rm7g\" (UID: \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\") " pod="openstack/keystone-cron-29402281-4rm7g" Nov 26 06:01:00 crc kubenswrapper[4871]: I1126 06:01:00.266544 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-combined-ca-bundle\") pod \"keystone-cron-29402281-4rm7g\" (UID: \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\") " pod="openstack/keystone-cron-29402281-4rm7g" Nov 26 06:01:00 crc kubenswrapper[4871]: I1126 06:01:00.368349 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-config-data\") pod \"keystone-cron-29402281-4rm7g\" (UID: \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\") " pod="openstack/keystone-cron-29402281-4rm7g" Nov 26 06:01:00 crc kubenswrapper[4871]: I1126 06:01:00.368578 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-combined-ca-bundle\") pod \"keystone-cron-29402281-4rm7g\" (UID: \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\") " pod="openstack/keystone-cron-29402281-4rm7g" Nov 26 06:01:00 crc kubenswrapper[4871]: I1126 06:01:00.368716 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdfnr\" (UniqueName: \"kubernetes.io/projected/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-kube-api-access-bdfnr\") pod \"keystone-cron-29402281-4rm7g\" (UID: \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\") " pod="openstack/keystone-cron-29402281-4rm7g" Nov 26 06:01:00 crc kubenswrapper[4871]: I1126 06:01:00.368839 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-fernet-keys\") pod \"keystone-cron-29402281-4rm7g\" (UID: \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\") " pod="openstack/keystone-cron-29402281-4rm7g" Nov 26 06:01:00 crc kubenswrapper[4871]: I1126 06:01:00.377469 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-combined-ca-bundle\") pod \"keystone-cron-29402281-4rm7g\" (UID: \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\") " pod="openstack/keystone-cron-29402281-4rm7g" Nov 26 06:01:00 crc kubenswrapper[4871]: I1126 06:01:00.378665 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-config-data\") pod \"keystone-cron-29402281-4rm7g\" (UID: \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\") " pod="openstack/keystone-cron-29402281-4rm7g" Nov 26 06:01:00 crc kubenswrapper[4871]: I1126 06:01:00.379267 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-fernet-keys\") pod \"keystone-cron-29402281-4rm7g\" (UID: \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\") " pod="openstack/keystone-cron-29402281-4rm7g" Nov 26 06:01:00 crc kubenswrapper[4871]: I1126 06:01:00.389155 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdfnr\" (UniqueName: \"kubernetes.io/projected/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-kube-api-access-bdfnr\") pod \"keystone-cron-29402281-4rm7g\" (UID: \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\") " pod="openstack/keystone-cron-29402281-4rm7g" Nov 26 06:01:00 crc kubenswrapper[4871]: I1126 06:01:00.459562 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402281-4rm7g" Nov 26 06:01:00 crc kubenswrapper[4871]: W1126 06:01:00.924465 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3af4cf7b_408a_44b2_a5b3_2919f8f8ee68.slice/crio-7f6be481f23b135e2520bc43aa2898075d36596114ea34ca52e54ea9e63f8688 WatchSource:0}: Error finding container 7f6be481f23b135e2520bc43aa2898075d36596114ea34ca52e54ea9e63f8688: Status 404 returned error can't find the container with id 7f6be481f23b135e2520bc43aa2898075d36596114ea34ca52e54ea9e63f8688 Nov 26 06:01:00 crc kubenswrapper[4871]: I1126 06:01:00.932285 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29402281-4rm7g"] Nov 26 06:01:01 crc kubenswrapper[4871]: I1126 06:01:01.535176 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-72vst" Nov 26 06:01:01 crc kubenswrapper[4871]: I1126 06:01:01.535516 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-72vst" Nov 26 06:01:01 crc kubenswrapper[4871]: I1126 06:01:01.592619 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-72vst" Nov 26 06:01:01 crc kubenswrapper[4871]: I1126 06:01:01.602732 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402281-4rm7g" event={"ID":"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68","Type":"ContainerStarted","Data":"2c7d056ef000bad7541ad488869dea1c06dc6a15524d69713df0803b158798ad"} Nov 26 06:01:01 crc kubenswrapper[4871]: I1126 06:01:01.602781 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402281-4rm7g" event={"ID":"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68","Type":"ContainerStarted","Data":"7f6be481f23b135e2520bc43aa2898075d36596114ea34ca52e54ea9e63f8688"} Nov 26 06:01:01 crc kubenswrapper[4871]: I1126 06:01:01.642924 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29402281-4rm7g" podStartSLOduration=1.64290727 podStartE2EDuration="1.64290727s" podCreationTimestamp="2025-11-26 06:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:01:01.637386633 +0000 UTC m=+2119.820438229" watchObservedRunningTime="2025-11-26 06:01:01.64290727 +0000 UTC m=+2119.825958856" Nov 26 06:01:01 crc kubenswrapper[4871]: I1126 06:01:01.663502 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-72vst" Nov 26 06:01:01 crc kubenswrapper[4871]: I1126 06:01:01.837674 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-72vst"] Nov 26 06:01:03 crc kubenswrapper[4871]: I1126 06:01:03.625824 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-72vst" podUID="111dc800-8903-483c-8420-322a90bf27e1" containerName="registry-server" containerID="cri-o://be2914f6c7d6781a202742d7ce8cf49550e5c5ef1f0e46e11618820e6c2f1172" gracePeriod=2 Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.203421 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-72vst" Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.254655 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/111dc800-8903-483c-8420-322a90bf27e1-catalog-content\") pod \"111dc800-8903-483c-8420-322a90bf27e1\" (UID: \"111dc800-8903-483c-8420-322a90bf27e1\") " Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.254744 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/111dc800-8903-483c-8420-322a90bf27e1-utilities\") pod \"111dc800-8903-483c-8420-322a90bf27e1\" (UID: \"111dc800-8903-483c-8420-322a90bf27e1\") " Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.254836 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nxlgq\" (UniqueName: \"kubernetes.io/projected/111dc800-8903-483c-8420-322a90bf27e1-kube-api-access-nxlgq\") pod \"111dc800-8903-483c-8420-322a90bf27e1\" (UID: \"111dc800-8903-483c-8420-322a90bf27e1\") " Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.255856 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/111dc800-8903-483c-8420-322a90bf27e1-utilities" (OuterVolumeSpecName: "utilities") pod "111dc800-8903-483c-8420-322a90bf27e1" (UID: "111dc800-8903-483c-8420-322a90bf27e1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.256211 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/111dc800-8903-483c-8420-322a90bf27e1-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.280282 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/111dc800-8903-483c-8420-322a90bf27e1-kube-api-access-nxlgq" (OuterVolumeSpecName: "kube-api-access-nxlgq") pod "111dc800-8903-483c-8420-322a90bf27e1" (UID: "111dc800-8903-483c-8420-322a90bf27e1"). InnerVolumeSpecName "kube-api-access-nxlgq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.331488 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/111dc800-8903-483c-8420-322a90bf27e1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "111dc800-8903-483c-8420-322a90bf27e1" (UID: "111dc800-8903-483c-8420-322a90bf27e1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.358001 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/111dc800-8903-483c-8420-322a90bf27e1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.358035 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nxlgq\" (UniqueName: \"kubernetes.io/projected/111dc800-8903-483c-8420-322a90bf27e1-kube-api-access-nxlgq\") on node \"crc\" DevicePath \"\"" Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.635690 4871 generic.go:334] "Generic (PLEG): container finished" podID="3af4cf7b-408a-44b2-a5b3-2919f8f8ee68" containerID="2c7d056ef000bad7541ad488869dea1c06dc6a15524d69713df0803b158798ad" exitCode=0 Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.635786 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402281-4rm7g" event={"ID":"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68","Type":"ContainerDied","Data":"2c7d056ef000bad7541ad488869dea1c06dc6a15524d69713df0803b158798ad"} Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.639676 4871 generic.go:334] "Generic (PLEG): container finished" podID="111dc800-8903-483c-8420-322a90bf27e1" containerID="be2914f6c7d6781a202742d7ce8cf49550e5c5ef1f0e46e11618820e6c2f1172" exitCode=0 Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.639707 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-72vst" event={"ID":"111dc800-8903-483c-8420-322a90bf27e1","Type":"ContainerDied","Data":"be2914f6c7d6781a202742d7ce8cf49550e5c5ef1f0e46e11618820e6c2f1172"} Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.639722 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-72vst" event={"ID":"111dc800-8903-483c-8420-322a90bf27e1","Type":"ContainerDied","Data":"5bb8f959dfbcb38f0ddb35ab93790d741a2737292d86e0e91ce78f2843da9349"} Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.639740 4871 scope.go:117] "RemoveContainer" containerID="be2914f6c7d6781a202742d7ce8cf49550e5c5ef1f0e46e11618820e6c2f1172" Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.639762 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-72vst" Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.665897 4871 scope.go:117] "RemoveContainer" containerID="ce6d2be364dad152e03480643a047a517cb8b0755a3593cad6ac8de93d471323" Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.685927 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-72vst"] Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.691645 4871 scope.go:117] "RemoveContainer" containerID="268ea80b44049943c7e59be50191e2d0cf6a1847b715f160b81ca6da82b9de94" Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.707051 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-72vst"] Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.736357 4871 scope.go:117] "RemoveContainer" containerID="be2914f6c7d6781a202742d7ce8cf49550e5c5ef1f0e46e11618820e6c2f1172" Nov 26 06:01:04 crc kubenswrapper[4871]: E1126 06:01:04.736940 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be2914f6c7d6781a202742d7ce8cf49550e5c5ef1f0e46e11618820e6c2f1172\": container with ID starting with be2914f6c7d6781a202742d7ce8cf49550e5c5ef1f0e46e11618820e6c2f1172 not found: ID does not exist" containerID="be2914f6c7d6781a202742d7ce8cf49550e5c5ef1f0e46e11618820e6c2f1172" Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.736986 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be2914f6c7d6781a202742d7ce8cf49550e5c5ef1f0e46e11618820e6c2f1172"} err="failed to get container status \"be2914f6c7d6781a202742d7ce8cf49550e5c5ef1f0e46e11618820e6c2f1172\": rpc error: code = NotFound desc = could not find container \"be2914f6c7d6781a202742d7ce8cf49550e5c5ef1f0e46e11618820e6c2f1172\": container with ID starting with be2914f6c7d6781a202742d7ce8cf49550e5c5ef1f0e46e11618820e6c2f1172 not found: ID does not exist" Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.737013 4871 scope.go:117] "RemoveContainer" containerID="ce6d2be364dad152e03480643a047a517cb8b0755a3593cad6ac8de93d471323" Nov 26 06:01:04 crc kubenswrapper[4871]: E1126 06:01:04.737620 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce6d2be364dad152e03480643a047a517cb8b0755a3593cad6ac8de93d471323\": container with ID starting with ce6d2be364dad152e03480643a047a517cb8b0755a3593cad6ac8de93d471323 not found: ID does not exist" containerID="ce6d2be364dad152e03480643a047a517cb8b0755a3593cad6ac8de93d471323" Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.737660 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce6d2be364dad152e03480643a047a517cb8b0755a3593cad6ac8de93d471323"} err="failed to get container status \"ce6d2be364dad152e03480643a047a517cb8b0755a3593cad6ac8de93d471323\": rpc error: code = NotFound desc = could not find container \"ce6d2be364dad152e03480643a047a517cb8b0755a3593cad6ac8de93d471323\": container with ID starting with ce6d2be364dad152e03480643a047a517cb8b0755a3593cad6ac8de93d471323 not found: ID does not exist" Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.737687 4871 scope.go:117] "RemoveContainer" containerID="268ea80b44049943c7e59be50191e2d0cf6a1847b715f160b81ca6da82b9de94" Nov 26 06:01:04 crc kubenswrapper[4871]: E1126 06:01:04.737981 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"268ea80b44049943c7e59be50191e2d0cf6a1847b715f160b81ca6da82b9de94\": container with ID starting with 268ea80b44049943c7e59be50191e2d0cf6a1847b715f160b81ca6da82b9de94 not found: ID does not exist" containerID="268ea80b44049943c7e59be50191e2d0cf6a1847b715f160b81ca6da82b9de94" Nov 26 06:01:04 crc kubenswrapper[4871]: I1126 06:01:04.738032 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"268ea80b44049943c7e59be50191e2d0cf6a1847b715f160b81ca6da82b9de94"} err="failed to get container status \"268ea80b44049943c7e59be50191e2d0cf6a1847b715f160b81ca6da82b9de94\": rpc error: code = NotFound desc = could not find container \"268ea80b44049943c7e59be50191e2d0cf6a1847b715f160b81ca6da82b9de94\": container with ID starting with 268ea80b44049943c7e59be50191e2d0cf6a1847b715f160b81ca6da82b9de94 not found: ID does not exist" Nov 26 06:01:06 crc kubenswrapper[4871]: I1126 06:01:06.032624 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402281-4rm7g" Nov 26 06:01:06 crc kubenswrapper[4871]: I1126 06:01:06.090491 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-combined-ca-bundle\") pod \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\" (UID: \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\") " Nov 26 06:01:06 crc kubenswrapper[4871]: I1126 06:01:06.090691 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-fernet-keys\") pod \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\" (UID: \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\") " Nov 26 06:01:06 crc kubenswrapper[4871]: I1126 06:01:06.090773 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-config-data\") pod \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\" (UID: \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\") " Nov 26 06:01:06 crc kubenswrapper[4871]: I1126 06:01:06.090805 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bdfnr\" (UniqueName: \"kubernetes.io/projected/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-kube-api-access-bdfnr\") pod \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\" (UID: \"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68\") " Nov 26 06:01:06 crc kubenswrapper[4871]: I1126 06:01:06.114262 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-kube-api-access-bdfnr" (OuterVolumeSpecName: "kube-api-access-bdfnr") pod "3af4cf7b-408a-44b2-a5b3-2919f8f8ee68" (UID: "3af4cf7b-408a-44b2-a5b3-2919f8f8ee68"). InnerVolumeSpecName "kube-api-access-bdfnr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:01:06 crc kubenswrapper[4871]: I1126 06:01:06.115053 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "3af4cf7b-408a-44b2-a5b3-2919f8f8ee68" (UID: "3af4cf7b-408a-44b2-a5b3-2919f8f8ee68"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:01:06 crc kubenswrapper[4871]: I1126 06:01:06.141565 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3af4cf7b-408a-44b2-a5b3-2919f8f8ee68" (UID: "3af4cf7b-408a-44b2-a5b3-2919f8f8ee68"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:01:06 crc kubenswrapper[4871]: I1126 06:01:06.161197 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-config-data" (OuterVolumeSpecName: "config-data") pod "3af4cf7b-408a-44b2-a5b3-2919f8f8ee68" (UID: "3af4cf7b-408a-44b2-a5b3-2919f8f8ee68"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:01:06 crc kubenswrapper[4871]: I1126 06:01:06.193237 4871 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 06:01:06 crc kubenswrapper[4871]: I1126 06:01:06.193285 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 06:01:06 crc kubenswrapper[4871]: I1126 06:01:06.193298 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bdfnr\" (UniqueName: \"kubernetes.io/projected/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-kube-api-access-bdfnr\") on node \"crc\" DevicePath \"\"" Nov 26 06:01:06 crc kubenswrapper[4871]: I1126 06:01:06.193313 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3af4cf7b-408a-44b2-a5b3-2919f8f8ee68-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:01:06 crc kubenswrapper[4871]: I1126 06:01:06.521785 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="111dc800-8903-483c-8420-322a90bf27e1" path="/var/lib/kubelet/pods/111dc800-8903-483c-8420-322a90bf27e1/volumes" Nov 26 06:01:06 crc kubenswrapper[4871]: I1126 06:01:06.663574 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402281-4rm7g" event={"ID":"3af4cf7b-408a-44b2-a5b3-2919f8f8ee68","Type":"ContainerDied","Data":"7f6be481f23b135e2520bc43aa2898075d36596114ea34ca52e54ea9e63f8688"} Nov 26 06:01:06 crc kubenswrapper[4871]: I1126 06:01:06.663620 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f6be481f23b135e2520bc43aa2898075d36596114ea34ca52e54ea9e63f8688" Nov 26 06:01:06 crc kubenswrapper[4871]: I1126 06:01:06.663674 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402281-4rm7g" Nov 26 06:01:53 crc kubenswrapper[4871]: I1126 06:01:53.614802 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:01:53 crc kubenswrapper[4871]: I1126 06:01:53.615511 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:01:55 crc kubenswrapper[4871]: I1126 06:01:55.239292 4871 generic.go:334] "Generic (PLEG): container finished" podID="8d747185-1d52-4102-be05-7f18ff179f3a" containerID="ee6f51c7e5ab819bb5c3f1c91b4cb9c634d5351ee9f8f8bbb9ce07ede5684dad" exitCode=0 Nov 26 06:01:55 crc kubenswrapper[4871]: I1126 06:01:55.239408 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" event={"ID":"8d747185-1d52-4102-be05-7f18ff179f3a","Type":"ContainerDied","Data":"ee6f51c7e5ab819bb5c3f1c91b4cb9c634d5351ee9f8f8bbb9ce07ede5684dad"} Nov 26 06:01:56 crc kubenswrapper[4871]: I1126 06:01:56.728357 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" Nov 26 06:01:56 crc kubenswrapper[4871]: I1126 06:01:56.928480 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d747185-1d52-4102-be05-7f18ff179f3a-inventory\") pod \"8d747185-1d52-4102-be05-7f18ff179f3a\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " Nov 26 06:01:56 crc kubenswrapper[4871]: I1126 06:01:56.928724 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j2776\" (UniqueName: \"kubernetes.io/projected/8d747185-1d52-4102-be05-7f18ff179f3a-kube-api-access-j2776\") pod \"8d747185-1d52-4102-be05-7f18ff179f3a\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " Nov 26 06:01:56 crc kubenswrapper[4871]: I1126 06:01:56.928847 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/8d747185-1d52-4102-be05-7f18ff179f3a-ovncontroller-config-0\") pod \"8d747185-1d52-4102-be05-7f18ff179f3a\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " Nov 26 06:01:56 crc kubenswrapper[4871]: I1126 06:01:56.928918 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d747185-1d52-4102-be05-7f18ff179f3a-ovn-combined-ca-bundle\") pod \"8d747185-1d52-4102-be05-7f18ff179f3a\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " Nov 26 06:01:56 crc kubenswrapper[4871]: I1126 06:01:56.928970 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d747185-1d52-4102-be05-7f18ff179f3a-ssh-key\") pod \"8d747185-1d52-4102-be05-7f18ff179f3a\" (UID: \"8d747185-1d52-4102-be05-7f18ff179f3a\") " Nov 26 06:01:56 crc kubenswrapper[4871]: I1126 06:01:56.934892 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d747185-1d52-4102-be05-7f18ff179f3a-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "8d747185-1d52-4102-be05-7f18ff179f3a" (UID: "8d747185-1d52-4102-be05-7f18ff179f3a"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:01:56 crc kubenswrapper[4871]: I1126 06:01:56.936645 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d747185-1d52-4102-be05-7f18ff179f3a-kube-api-access-j2776" (OuterVolumeSpecName: "kube-api-access-j2776") pod "8d747185-1d52-4102-be05-7f18ff179f3a" (UID: "8d747185-1d52-4102-be05-7f18ff179f3a"). InnerVolumeSpecName "kube-api-access-j2776". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:01:56 crc kubenswrapper[4871]: I1126 06:01:56.965026 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d747185-1d52-4102-be05-7f18ff179f3a-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "8d747185-1d52-4102-be05-7f18ff179f3a" (UID: "8d747185-1d52-4102-be05-7f18ff179f3a"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:01:56 crc kubenswrapper[4871]: I1126 06:01:56.988874 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d747185-1d52-4102-be05-7f18ff179f3a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8d747185-1d52-4102-be05-7f18ff179f3a" (UID: "8d747185-1d52-4102-be05-7f18ff179f3a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:01:56 crc kubenswrapper[4871]: I1126 06:01:56.992384 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d747185-1d52-4102-be05-7f18ff179f3a-inventory" (OuterVolumeSpecName: "inventory") pod "8d747185-1d52-4102-be05-7f18ff179f3a" (UID: "8d747185-1d52-4102-be05-7f18ff179f3a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.031246 4871 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d747185-1d52-4102-be05-7f18ff179f3a-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.031279 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j2776\" (UniqueName: \"kubernetes.io/projected/8d747185-1d52-4102-be05-7f18ff179f3a-kube-api-access-j2776\") on node \"crc\" DevicePath \"\"" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.031297 4871 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/8d747185-1d52-4102-be05-7f18ff179f3a-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.031309 4871 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d747185-1d52-4102-be05-7f18ff179f3a-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.031321 4871 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d747185-1d52-4102-be05-7f18ff179f3a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.262190 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" event={"ID":"8d747185-1d52-4102-be05-7f18ff179f3a","Type":"ContainerDied","Data":"b2f990eb94e5db51a9643321ca4c1af2764271a388229449f06da3d65b1cc122"} Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.262232 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2f990eb94e5db51a9643321ca4c1af2764271a388229449f06da3d65b1cc122" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.262271 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-896gl" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.407656 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf"] Nov 26 06:01:57 crc kubenswrapper[4871]: E1126 06:01:57.408205 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d747185-1d52-4102-be05-7f18ff179f3a" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.408228 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d747185-1d52-4102-be05-7f18ff179f3a" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 26 06:01:57 crc kubenswrapper[4871]: E1126 06:01:57.408258 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="111dc800-8903-483c-8420-322a90bf27e1" containerName="registry-server" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.408267 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="111dc800-8903-483c-8420-322a90bf27e1" containerName="registry-server" Nov 26 06:01:57 crc kubenswrapper[4871]: E1126 06:01:57.408282 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3af4cf7b-408a-44b2-a5b3-2919f8f8ee68" containerName="keystone-cron" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.408292 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="3af4cf7b-408a-44b2-a5b3-2919f8f8ee68" containerName="keystone-cron" Nov 26 06:01:57 crc kubenswrapper[4871]: E1126 06:01:57.408306 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="111dc800-8903-483c-8420-322a90bf27e1" containerName="extract-content" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.408314 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="111dc800-8903-483c-8420-322a90bf27e1" containerName="extract-content" Nov 26 06:01:57 crc kubenswrapper[4871]: E1126 06:01:57.408331 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="111dc800-8903-483c-8420-322a90bf27e1" containerName="extract-utilities" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.408339 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="111dc800-8903-483c-8420-322a90bf27e1" containerName="extract-utilities" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.408624 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d747185-1d52-4102-be05-7f18ff179f3a" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.408637 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="3af4cf7b-408a-44b2-a5b3-2919f8f8ee68" containerName="keystone-cron" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.408659 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="111dc800-8903-483c-8420-322a90bf27e1" containerName="registry-server" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.409597 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.419060 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.419189 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-pjzlp" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.419282 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.419392 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.419647 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.422750 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf"] Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.424367 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.442637 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.442731 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.442763 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.442872 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.442923 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.442990 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtntd\" (UniqueName: \"kubernetes.io/projected/b6bbc102-0536-4833-8d96-a94360126601-kube-api-access-gtntd\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.545258 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.545429 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.545618 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtntd\" (UniqueName: \"kubernetes.io/projected/b6bbc102-0536-4833-8d96-a94360126601-kube-api-access-gtntd\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.545798 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.545948 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.546003 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.549261 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.549262 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.549801 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.550078 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.551895 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.562290 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtntd\" (UniqueName: \"kubernetes.io/projected/b6bbc102-0536-4833-8d96-a94360126601-kube-api-access-gtntd\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:57 crc kubenswrapper[4871]: I1126 06:01:57.730631 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:01:58 crc kubenswrapper[4871]: I1126 06:01:58.344702 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf"] Nov 26 06:01:59 crc kubenswrapper[4871]: I1126 06:01:59.299872 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" event={"ID":"b6bbc102-0536-4833-8d96-a94360126601","Type":"ContainerStarted","Data":"98a3d5881f04f566dc173be9f22f64054aad86a26aad67273437253fe337c78e"} Nov 26 06:01:59 crc kubenswrapper[4871]: I1126 06:01:59.300322 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" event={"ID":"b6bbc102-0536-4833-8d96-a94360126601","Type":"ContainerStarted","Data":"6eb58fb05a0732e06da58fdd4ba12e957ea5574a2121411730872f3d5a0f103d"} Nov 26 06:01:59 crc kubenswrapper[4871]: I1126 06:01:59.328470 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" podStartSLOduration=1.854925605 podStartE2EDuration="2.328452037s" podCreationTimestamp="2025-11-26 06:01:57 +0000 UTC" firstStartedPulling="2025-11-26 06:01:58.350168548 +0000 UTC m=+2176.533220174" lastFinishedPulling="2025-11-26 06:01:58.82369501 +0000 UTC m=+2177.006746606" observedRunningTime="2025-11-26 06:01:59.32695886 +0000 UTC m=+2177.510010506" watchObservedRunningTime="2025-11-26 06:01:59.328452037 +0000 UTC m=+2177.511503633" Nov 26 06:02:23 crc kubenswrapper[4871]: I1126 06:02:23.615397 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:02:23 crc kubenswrapper[4871]: I1126 06:02:23.616199 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:02:52 crc kubenswrapper[4871]: I1126 06:02:52.953288 4871 generic.go:334] "Generic (PLEG): container finished" podID="b6bbc102-0536-4833-8d96-a94360126601" containerID="98a3d5881f04f566dc173be9f22f64054aad86a26aad67273437253fe337c78e" exitCode=0 Nov 26 06:02:52 crc kubenswrapper[4871]: I1126 06:02:52.953599 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" event={"ID":"b6bbc102-0536-4833-8d96-a94360126601","Type":"ContainerDied","Data":"98a3d5881f04f566dc173be9f22f64054aad86a26aad67273437253fe337c78e"} Nov 26 06:02:53 crc kubenswrapper[4871]: I1126 06:02:53.615469 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:02:53 crc kubenswrapper[4871]: I1126 06:02:53.615853 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:02:53 crc kubenswrapper[4871]: I1126 06:02:53.615908 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 06:02:53 crc kubenswrapper[4871]: I1126 06:02:53.616776 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 06:02:53 crc kubenswrapper[4871]: I1126 06:02:53.616849 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" gracePeriod=600 Nov 26 06:02:53 crc kubenswrapper[4871]: E1126 06:02:53.751600 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:02:53 crc kubenswrapper[4871]: I1126 06:02:53.966467 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" exitCode=0 Nov 26 06:02:53 crc kubenswrapper[4871]: I1126 06:02:53.966580 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0"} Nov 26 06:02:53 crc kubenswrapper[4871]: I1126 06:02:53.966675 4871 scope.go:117] "RemoveContainer" containerID="690714808ddb6f01775f9048b33eb3edf7f436bcc65bc1eb71c8be346f112ef0" Nov 26 06:02:53 crc kubenswrapper[4871]: I1126 06:02:53.967942 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:02:53 crc kubenswrapper[4871]: E1126 06:02:53.968367 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.443047 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.453635 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-inventory\") pod \"b6bbc102-0536-4833-8d96-a94360126601\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.453730 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-neutron-ovn-metadata-agent-neutron-config-0\") pod \"b6bbc102-0536-4833-8d96-a94360126601\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.453894 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-ssh-key\") pod \"b6bbc102-0536-4833-8d96-a94360126601\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.454062 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-neutron-metadata-combined-ca-bundle\") pod \"b6bbc102-0536-4833-8d96-a94360126601\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.454126 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-nova-metadata-neutron-config-0\") pod \"b6bbc102-0536-4833-8d96-a94360126601\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.454177 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtntd\" (UniqueName: \"kubernetes.io/projected/b6bbc102-0536-4833-8d96-a94360126601-kube-api-access-gtntd\") pod \"b6bbc102-0536-4833-8d96-a94360126601\" (UID: \"b6bbc102-0536-4833-8d96-a94360126601\") " Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.459763 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6bbc102-0536-4833-8d96-a94360126601-kube-api-access-gtntd" (OuterVolumeSpecName: "kube-api-access-gtntd") pod "b6bbc102-0536-4833-8d96-a94360126601" (UID: "b6bbc102-0536-4833-8d96-a94360126601"). InnerVolumeSpecName "kube-api-access-gtntd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.461309 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "b6bbc102-0536-4833-8d96-a94360126601" (UID: "b6bbc102-0536-4833-8d96-a94360126601"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.494479 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-inventory" (OuterVolumeSpecName: "inventory") pod "b6bbc102-0536-4833-8d96-a94360126601" (UID: "b6bbc102-0536-4833-8d96-a94360126601"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.495109 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "b6bbc102-0536-4833-8d96-a94360126601" (UID: "b6bbc102-0536-4833-8d96-a94360126601"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.496246 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "b6bbc102-0536-4833-8d96-a94360126601" (UID: "b6bbc102-0536-4833-8d96-a94360126601"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.497940 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b6bbc102-0536-4833-8d96-a94360126601" (UID: "b6bbc102-0536-4833-8d96-a94360126601"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.558229 4871 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.558286 4871 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.558308 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtntd\" (UniqueName: \"kubernetes.io/projected/b6bbc102-0536-4833-8d96-a94360126601-kube-api-access-gtntd\") on node \"crc\" DevicePath \"\"" Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.558352 4871 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.558386 4871 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.558412 4871 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b6bbc102-0536-4833-8d96-a94360126601-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.987331 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" event={"ID":"b6bbc102-0536-4833-8d96-a94360126601","Type":"ContainerDied","Data":"6eb58fb05a0732e06da58fdd4ba12e957ea5574a2121411730872f3d5a0f103d"} Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.987924 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6eb58fb05a0732e06da58fdd4ba12e957ea5574a2121411730872f3d5a0f103d" Nov 26 06:02:54 crc kubenswrapper[4871]: I1126 06:02:54.987425 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.141225 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn"] Nov 26 06:02:55 crc kubenswrapper[4871]: E1126 06:02:55.141745 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6bbc102-0536-4833-8d96-a94360126601" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.141768 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6bbc102-0536-4833-8d96-a94360126601" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.142000 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6bbc102-0536-4833-8d96-a94360126601" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.142861 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.149151 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.156024 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.157066 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.157635 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.175350 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-pjzlp" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.191284 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn"] Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.308915 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.308979 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.309024 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.309219 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.309310 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgjlz\" (UniqueName: \"kubernetes.io/projected/95ebef76-794b-40b5-bf99-3604b66446f2-kube-api-access-cgjlz\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.411195 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.411295 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgjlz\" (UniqueName: \"kubernetes.io/projected/95ebef76-794b-40b5-bf99-3604b66446f2-kube-api-access-cgjlz\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.411449 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.411491 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.411551 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.417125 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.417211 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.417933 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.418853 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.438851 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgjlz\" (UniqueName: \"kubernetes.io/projected/95ebef76-794b-40b5-bf99-3604b66446f2-kube-api-access-cgjlz\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" Nov 26 06:02:55 crc kubenswrapper[4871]: I1126 06:02:55.475891 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" Nov 26 06:02:56 crc kubenswrapper[4871]: I1126 06:02:56.076955 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn"] Nov 26 06:02:56 crc kubenswrapper[4871]: W1126 06:02:56.083825 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95ebef76_794b_40b5_bf99_3604b66446f2.slice/crio-b23fddc4db93692c87598db77fa9569849993f0712a786eb9298ffb4befd515c WatchSource:0}: Error finding container b23fddc4db93692c87598db77fa9569849993f0712a786eb9298ffb4befd515c: Status 404 returned error can't find the container with id b23fddc4db93692c87598db77fa9569849993f0712a786eb9298ffb4befd515c Nov 26 06:02:56 crc kubenswrapper[4871]: I1126 06:02:56.088138 4871 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 06:02:57 crc kubenswrapper[4871]: I1126 06:02:57.007456 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" event={"ID":"95ebef76-794b-40b5-bf99-3604b66446f2","Type":"ContainerStarted","Data":"80d8b091aed097189f96d3b5ae3fc1961e85ce0c7195d12f7f1e93547ee7df62"} Nov 26 06:02:57 crc kubenswrapper[4871]: I1126 06:02:57.007909 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" event={"ID":"95ebef76-794b-40b5-bf99-3604b66446f2","Type":"ContainerStarted","Data":"b23fddc4db93692c87598db77fa9569849993f0712a786eb9298ffb4befd515c"} Nov 26 06:02:57 crc kubenswrapper[4871]: I1126 06:02:57.048227 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" podStartSLOduration=1.578864088 podStartE2EDuration="2.048201256s" podCreationTimestamp="2025-11-26 06:02:55 +0000 UTC" firstStartedPulling="2025-11-26 06:02:56.08738755 +0000 UTC m=+2234.270439146" lastFinishedPulling="2025-11-26 06:02:56.556724718 +0000 UTC m=+2234.739776314" observedRunningTime="2025-11-26 06:02:57.027971594 +0000 UTC m=+2235.211023210" watchObservedRunningTime="2025-11-26 06:02:57.048201256 +0000 UTC m=+2235.231252872" Nov 26 06:03:06 crc kubenswrapper[4871]: I1126 06:03:06.508235 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:03:06 crc kubenswrapper[4871]: E1126 06:03:06.509354 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:03:21 crc kubenswrapper[4871]: I1126 06:03:21.508074 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:03:21 crc kubenswrapper[4871]: E1126 06:03:21.509126 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:03:36 crc kubenswrapper[4871]: I1126 06:03:36.508234 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:03:36 crc kubenswrapper[4871]: E1126 06:03:36.509345 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:03:49 crc kubenswrapper[4871]: I1126 06:03:49.508018 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:03:49 crc kubenswrapper[4871]: E1126 06:03:49.509220 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:04:02 crc kubenswrapper[4871]: I1126 06:04:02.532218 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:04:02 crc kubenswrapper[4871]: E1126 06:04:02.535245 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:04:13 crc kubenswrapper[4871]: I1126 06:04:13.507371 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:04:13 crc kubenswrapper[4871]: E1126 06:04:13.508408 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:04:24 crc kubenswrapper[4871]: I1126 06:04:24.508893 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:04:24 crc kubenswrapper[4871]: E1126 06:04:24.509496 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:04:38 crc kubenswrapper[4871]: I1126 06:04:38.508341 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:04:38 crc kubenswrapper[4871]: E1126 06:04:38.509918 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:04:50 crc kubenswrapper[4871]: I1126 06:04:50.507605 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:04:50 crc kubenswrapper[4871]: E1126 06:04:50.508548 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:05:05 crc kubenswrapper[4871]: I1126 06:05:05.508179 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:05:05 crc kubenswrapper[4871]: E1126 06:05:05.508983 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:05:20 crc kubenswrapper[4871]: I1126 06:05:20.507279 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:05:20 crc kubenswrapper[4871]: E1126 06:05:20.508194 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:05:34 crc kubenswrapper[4871]: I1126 06:05:34.507390 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:05:34 crc kubenswrapper[4871]: E1126 06:05:34.508187 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:05:47 crc kubenswrapper[4871]: I1126 06:05:47.507043 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:05:47 crc kubenswrapper[4871]: E1126 06:05:47.507940 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:06:01 crc kubenswrapper[4871]: I1126 06:06:01.507058 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:06:01 crc kubenswrapper[4871]: E1126 06:06:01.507923 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:06:12 crc kubenswrapper[4871]: I1126 06:06:12.519031 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:06:12 crc kubenswrapper[4871]: E1126 06:06:12.520090 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:06:24 crc kubenswrapper[4871]: I1126 06:06:24.507899 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:06:24 crc kubenswrapper[4871]: E1126 06:06:24.508918 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:06:39 crc kubenswrapper[4871]: I1126 06:06:39.507422 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:06:39 crc kubenswrapper[4871]: E1126 06:06:39.508491 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:06:51 crc kubenswrapper[4871]: I1126 06:06:51.508635 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:06:51 crc kubenswrapper[4871]: E1126 06:06:51.509825 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:07:03 crc kubenswrapper[4871]: I1126 06:07:03.507241 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:07:03 crc kubenswrapper[4871]: E1126 06:07:03.508515 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:07:14 crc kubenswrapper[4871]: I1126 06:07:14.508448 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:07:14 crc kubenswrapper[4871]: E1126 06:07:14.509644 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:07:20 crc kubenswrapper[4871]: I1126 06:07:20.145337 4871 generic.go:334] "Generic (PLEG): container finished" podID="95ebef76-794b-40b5-bf99-3604b66446f2" containerID="80d8b091aed097189f96d3b5ae3fc1961e85ce0c7195d12f7f1e93547ee7df62" exitCode=0 Nov 26 06:07:20 crc kubenswrapper[4871]: I1126 06:07:20.145419 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" event={"ID":"95ebef76-794b-40b5-bf99-3604b66446f2","Type":"ContainerDied","Data":"80d8b091aed097189f96d3b5ae3fc1961e85ce0c7195d12f7f1e93547ee7df62"} Nov 26 06:07:21 crc kubenswrapper[4871]: I1126 06:07:21.701483 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" Nov 26 06:07:21 crc kubenswrapper[4871]: I1126 06:07:21.875388 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-libvirt-secret-0\") pod \"95ebef76-794b-40b5-bf99-3604b66446f2\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " Nov 26 06:07:21 crc kubenswrapper[4871]: I1126 06:07:21.875451 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-libvirt-combined-ca-bundle\") pod \"95ebef76-794b-40b5-bf99-3604b66446f2\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " Nov 26 06:07:21 crc kubenswrapper[4871]: I1126 06:07:21.875479 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-ssh-key\") pod \"95ebef76-794b-40b5-bf99-3604b66446f2\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " Nov 26 06:07:21 crc kubenswrapper[4871]: I1126 06:07:21.875511 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cgjlz\" (UniqueName: \"kubernetes.io/projected/95ebef76-794b-40b5-bf99-3604b66446f2-kube-api-access-cgjlz\") pod \"95ebef76-794b-40b5-bf99-3604b66446f2\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " Nov 26 06:07:21 crc kubenswrapper[4871]: I1126 06:07:21.875577 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-inventory\") pod \"95ebef76-794b-40b5-bf99-3604b66446f2\" (UID: \"95ebef76-794b-40b5-bf99-3604b66446f2\") " Nov 26 06:07:21 crc kubenswrapper[4871]: I1126 06:07:21.881886 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95ebef76-794b-40b5-bf99-3604b66446f2-kube-api-access-cgjlz" (OuterVolumeSpecName: "kube-api-access-cgjlz") pod "95ebef76-794b-40b5-bf99-3604b66446f2" (UID: "95ebef76-794b-40b5-bf99-3604b66446f2"). InnerVolumeSpecName "kube-api-access-cgjlz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:07:21 crc kubenswrapper[4871]: I1126 06:07:21.883488 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "95ebef76-794b-40b5-bf99-3604b66446f2" (UID: "95ebef76-794b-40b5-bf99-3604b66446f2"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:07:21 crc kubenswrapper[4871]: I1126 06:07:21.918197 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "95ebef76-794b-40b5-bf99-3604b66446f2" (UID: "95ebef76-794b-40b5-bf99-3604b66446f2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:07:21 crc kubenswrapper[4871]: I1126 06:07:21.933867 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "95ebef76-794b-40b5-bf99-3604b66446f2" (UID: "95ebef76-794b-40b5-bf99-3604b66446f2"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:07:21 crc kubenswrapper[4871]: I1126 06:07:21.937378 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-inventory" (OuterVolumeSpecName: "inventory") pod "95ebef76-794b-40b5-bf99-3604b66446f2" (UID: "95ebef76-794b-40b5-bf99-3604b66446f2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:07:21 crc kubenswrapper[4871]: I1126 06:07:21.977840 4871 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 26 06:07:21 crc kubenswrapper[4871]: I1126 06:07:21.977884 4871 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:07:21 crc kubenswrapper[4871]: I1126 06:07:21.977899 4871 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 06:07:21 crc kubenswrapper[4871]: I1126 06:07:21.977911 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cgjlz\" (UniqueName: \"kubernetes.io/projected/95ebef76-794b-40b5-bf99-3604b66446f2-kube-api-access-cgjlz\") on node \"crc\" DevicePath \"\"" Nov 26 06:07:21 crc kubenswrapper[4871]: I1126 06:07:21.977923 4871 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/95ebef76-794b-40b5-bf99-3604b66446f2-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.175302 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" event={"ID":"95ebef76-794b-40b5-bf99-3604b66446f2","Type":"ContainerDied","Data":"b23fddc4db93692c87598db77fa9569849993f0712a786eb9298ffb4befd515c"} Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.175352 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b23fddc4db93692c87598db77fa9569849993f0712a786eb9298ffb4befd515c" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.175382 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.306632 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7"] Nov 26 06:07:22 crc kubenswrapper[4871]: E1126 06:07:22.307329 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95ebef76-794b-40b5-bf99-3604b66446f2" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.307360 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="95ebef76-794b-40b5-bf99-3604b66446f2" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.307701 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="95ebef76-794b-40b5-bf99-3604b66446f2" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.308650 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.310848 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.311177 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.311351 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.311469 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.311632 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.311982 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.312889 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-pjzlp" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.320444 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7"] Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.499894 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.499990 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.500049 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkjfj\" (UniqueName: \"kubernetes.io/projected/4872fb15-1719-4e77-b0c1-7a2754ff7991-kube-api-access-qkjfj\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.500126 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.500162 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.500402 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.500516 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.500723 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.500890 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.602692 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.602785 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.602874 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.602929 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.602981 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.603025 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkjfj\" (UniqueName: \"kubernetes.io/projected/4872fb15-1719-4e77-b0c1-7a2754ff7991-kube-api-access-qkjfj\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.603066 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.603105 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.603223 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.604817 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.609173 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.609878 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.610258 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.612331 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.613661 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.614044 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.614709 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.626733 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkjfj\" (UniqueName: \"kubernetes.io/projected/4872fb15-1719-4e77-b0c1-7a2754ff7991-kube-api-access-qkjfj\") pod \"nova-edpm-deployment-openstack-edpm-ipam-wnbt7\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:22 crc kubenswrapper[4871]: I1126 06:07:22.628697 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:07:23 crc kubenswrapper[4871]: I1126 06:07:23.222040 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7"] Nov 26 06:07:24 crc kubenswrapper[4871]: I1126 06:07:24.204119 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" event={"ID":"4872fb15-1719-4e77-b0c1-7a2754ff7991","Type":"ContainerStarted","Data":"2653bd96ae7e62887ddce8d8d1723a643feeaadeefa1bea7a88eb975ad669f7c"} Nov 26 06:07:24 crc kubenswrapper[4871]: I1126 06:07:24.204609 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" event={"ID":"4872fb15-1719-4e77-b0c1-7a2754ff7991","Type":"ContainerStarted","Data":"48126beaa4de8a40f2389e33baa87bff0e1444f7f41b86069b625227e584718f"} Nov 26 06:07:24 crc kubenswrapper[4871]: I1126 06:07:24.234049 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" podStartSLOduration=1.819689362 podStartE2EDuration="2.234027553s" podCreationTimestamp="2025-11-26 06:07:22 +0000 UTC" firstStartedPulling="2025-11-26 06:07:23.22878586 +0000 UTC m=+2501.411837456" lastFinishedPulling="2025-11-26 06:07:23.643124021 +0000 UTC m=+2501.826175647" observedRunningTime="2025-11-26 06:07:24.233149632 +0000 UTC m=+2502.416201218" watchObservedRunningTime="2025-11-26 06:07:24.234027553 +0000 UTC m=+2502.417079159" Nov 26 06:07:25 crc kubenswrapper[4871]: I1126 06:07:25.508925 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:07:25 crc kubenswrapper[4871]: E1126 06:07:25.509696 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:07:40 crc kubenswrapper[4871]: I1126 06:07:40.507741 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:07:40 crc kubenswrapper[4871]: E1126 06:07:40.508810 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:07:55 crc kubenswrapper[4871]: I1126 06:07:55.507494 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:07:56 crc kubenswrapper[4871]: I1126 06:07:56.613547 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"f70aea01c68d7922032007da61abc4f689feb69d32698e40339cf47f34bc06bf"} Nov 26 06:10:23 crc kubenswrapper[4871]: I1126 06:10:23.615095 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:10:23 crc kubenswrapper[4871]: I1126 06:10:23.615595 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:10:29 crc kubenswrapper[4871]: I1126 06:10:29.313659 4871 generic.go:334] "Generic (PLEG): container finished" podID="4872fb15-1719-4e77-b0c1-7a2754ff7991" containerID="2653bd96ae7e62887ddce8d8d1723a643feeaadeefa1bea7a88eb975ad669f7c" exitCode=0 Nov 26 06:10:29 crc kubenswrapper[4871]: I1126 06:10:29.313745 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" event={"ID":"4872fb15-1719-4e77-b0c1-7a2754ff7991","Type":"ContainerDied","Data":"2653bd96ae7e62887ddce8d8d1723a643feeaadeefa1bea7a88eb975ad669f7c"} Nov 26 06:10:30 crc kubenswrapper[4871]: I1126 06:10:30.830627 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:10:30 crc kubenswrapper[4871]: I1126 06:10:30.916118 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-migration-ssh-key-0\") pod \"4872fb15-1719-4e77-b0c1-7a2754ff7991\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " Nov 26 06:10:30 crc kubenswrapper[4871]: I1126 06:10:30.916448 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-combined-ca-bundle\") pod \"4872fb15-1719-4e77-b0c1-7a2754ff7991\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " Nov 26 06:10:30 crc kubenswrapper[4871]: I1126 06:10:30.916615 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-extra-config-0\") pod \"4872fb15-1719-4e77-b0c1-7a2754ff7991\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " Nov 26 06:10:30 crc kubenswrapper[4871]: I1126 06:10:30.916764 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-ssh-key\") pod \"4872fb15-1719-4e77-b0c1-7a2754ff7991\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " Nov 26 06:10:30 crc kubenswrapper[4871]: I1126 06:10:30.917005 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-cell1-compute-config-1\") pod \"4872fb15-1719-4e77-b0c1-7a2754ff7991\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " Nov 26 06:10:30 crc kubenswrapper[4871]: I1126 06:10:30.917118 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-inventory\") pod \"4872fb15-1719-4e77-b0c1-7a2754ff7991\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " Nov 26 06:10:30 crc kubenswrapper[4871]: I1126 06:10:30.917264 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-migration-ssh-key-1\") pod \"4872fb15-1719-4e77-b0c1-7a2754ff7991\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " Nov 26 06:10:30 crc kubenswrapper[4871]: I1126 06:10:30.917380 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-cell1-compute-config-0\") pod \"4872fb15-1719-4e77-b0c1-7a2754ff7991\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " Nov 26 06:10:30 crc kubenswrapper[4871]: I1126 06:10:30.917483 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkjfj\" (UniqueName: \"kubernetes.io/projected/4872fb15-1719-4e77-b0c1-7a2754ff7991-kube-api-access-qkjfj\") pod \"4872fb15-1719-4e77-b0c1-7a2754ff7991\" (UID: \"4872fb15-1719-4e77-b0c1-7a2754ff7991\") " Nov 26 06:10:30 crc kubenswrapper[4871]: I1126 06:10:30.923788 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4872fb15-1719-4e77-b0c1-7a2754ff7991-kube-api-access-qkjfj" (OuterVolumeSpecName: "kube-api-access-qkjfj") pod "4872fb15-1719-4e77-b0c1-7a2754ff7991" (UID: "4872fb15-1719-4e77-b0c1-7a2754ff7991"). InnerVolumeSpecName "kube-api-access-qkjfj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:10:30 crc kubenswrapper[4871]: I1126 06:10:30.923787 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "4872fb15-1719-4e77-b0c1-7a2754ff7991" (UID: "4872fb15-1719-4e77-b0c1-7a2754ff7991"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:10:30 crc kubenswrapper[4871]: I1126 06:10:30.951133 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "4872fb15-1719-4e77-b0c1-7a2754ff7991" (UID: "4872fb15-1719-4e77-b0c1-7a2754ff7991"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:10:30 crc kubenswrapper[4871]: I1126 06:10:30.953562 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-inventory" (OuterVolumeSpecName: "inventory") pod "4872fb15-1719-4e77-b0c1-7a2754ff7991" (UID: "4872fb15-1719-4e77-b0c1-7a2754ff7991"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:10:30 crc kubenswrapper[4871]: I1126 06:10:30.955213 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "4872fb15-1719-4e77-b0c1-7a2754ff7991" (UID: "4872fb15-1719-4e77-b0c1-7a2754ff7991"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:10:30 crc kubenswrapper[4871]: I1126 06:10:30.955679 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4872fb15-1719-4e77-b0c1-7a2754ff7991" (UID: "4872fb15-1719-4e77-b0c1-7a2754ff7991"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:10:30 crc kubenswrapper[4871]: I1126 06:10:30.960993 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "4872fb15-1719-4e77-b0c1-7a2754ff7991" (UID: "4872fb15-1719-4e77-b0c1-7a2754ff7991"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:10:30 crc kubenswrapper[4871]: I1126 06:10:30.965501 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "4872fb15-1719-4e77-b0c1-7a2754ff7991" (UID: "4872fb15-1719-4e77-b0c1-7a2754ff7991"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:10:30 crc kubenswrapper[4871]: I1126 06:10:30.973599 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "4872fb15-1719-4e77-b0c1-7a2754ff7991" (UID: "4872fb15-1719-4e77-b0c1-7a2754ff7991"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.020654 4871 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.020697 4871 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.020710 4871 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.020723 4871 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.020731 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkjfj\" (UniqueName: \"kubernetes.io/projected/4872fb15-1719-4e77-b0c1-7a2754ff7991-kube-api-access-qkjfj\") on node \"crc\" DevicePath \"\"" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.020742 4871 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.020752 4871 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.020760 4871 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4872fb15-1719-4e77-b0c1-7a2754ff7991-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.020767 4871 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4872fb15-1719-4e77-b0c1-7a2754ff7991-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.336378 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" event={"ID":"4872fb15-1719-4e77-b0c1-7a2754ff7991","Type":"ContainerDied","Data":"48126beaa4de8a40f2389e33baa87bff0e1444f7f41b86069b625227e584718f"} Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.336429 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="48126beaa4de8a40f2389e33baa87bff0e1444f7f41b86069b625227e584718f" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.336456 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-wnbt7" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.445693 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj"] Nov 26 06:10:31 crc kubenswrapper[4871]: E1126 06:10:31.446104 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4872fb15-1719-4e77-b0c1-7a2754ff7991" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.446122 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="4872fb15-1719-4e77-b0c1-7a2754ff7991" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.446330 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="4872fb15-1719-4e77-b0c1-7a2754ff7991" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.447068 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.450201 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.450229 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.450256 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.450740 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-pjzlp" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.452709 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.462517 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj"] Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.531617 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.531658 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zk8p\" (UniqueName: \"kubernetes.io/projected/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-kube-api-access-4zk8p\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.531698 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.531735 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.531774 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.531804 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.531873 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.633586 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.633741 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.633795 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zk8p\" (UniqueName: \"kubernetes.io/projected/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-kube-api-access-4zk8p\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.633868 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.633921 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.633979 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.634036 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.645569 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.648110 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.648263 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.649121 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.660054 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.660500 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.672329 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zk8p\" (UniqueName: \"kubernetes.io/projected/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-kube-api-access-4zk8p\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8mffj\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:31 crc kubenswrapper[4871]: I1126 06:10:31.764893 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:10:32 crc kubenswrapper[4871]: I1126 06:10:32.319198 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj"] Nov 26 06:10:32 crc kubenswrapper[4871]: I1126 06:10:32.328852 4871 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 06:10:32 crc kubenswrapper[4871]: I1126 06:10:32.349417 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" event={"ID":"bfc1b363-fb5b-4872-bf7f-215dc9c617b5","Type":"ContainerStarted","Data":"0928c300e95bea0372fdc1cd614d4d00f9aa2eac1396316e7491b21a5c642751"} Nov 26 06:10:33 crc kubenswrapper[4871]: I1126 06:10:33.363130 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" event={"ID":"bfc1b363-fb5b-4872-bf7f-215dc9c617b5","Type":"ContainerStarted","Data":"6a3d0cde83ea6f9ad339a08bbf79bb4b2c5df5e782972e8a611046d2b3066260"} Nov 26 06:10:33 crc kubenswrapper[4871]: I1126 06:10:33.398518 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" podStartSLOduration=1.96152711 podStartE2EDuration="2.398483689s" podCreationTimestamp="2025-11-26 06:10:31 +0000 UTC" firstStartedPulling="2025-11-26 06:10:32.328316228 +0000 UTC m=+2690.511367824" lastFinishedPulling="2025-11-26 06:10:32.765272807 +0000 UTC m=+2690.948324403" observedRunningTime="2025-11-26 06:10:33.388294677 +0000 UTC m=+2691.571346263" watchObservedRunningTime="2025-11-26 06:10:33.398483689 +0000 UTC m=+2691.581535305" Nov 26 06:10:50 crc kubenswrapper[4871]: I1126 06:10:50.558597 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ghwkc"] Nov 26 06:10:50 crc kubenswrapper[4871]: I1126 06:10:50.561517 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ghwkc" Nov 26 06:10:50 crc kubenswrapper[4871]: I1126 06:10:50.572776 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ghwkc"] Nov 26 06:10:50 crc kubenswrapper[4871]: I1126 06:10:50.660314 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c10b0a33-7311-4c46-9722-d44d42f00b74-utilities\") pod \"redhat-marketplace-ghwkc\" (UID: \"c10b0a33-7311-4c46-9722-d44d42f00b74\") " pod="openshift-marketplace/redhat-marketplace-ghwkc" Nov 26 06:10:50 crc kubenswrapper[4871]: I1126 06:10:50.660358 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhv67\" (UniqueName: \"kubernetes.io/projected/c10b0a33-7311-4c46-9722-d44d42f00b74-kube-api-access-zhv67\") pod \"redhat-marketplace-ghwkc\" (UID: \"c10b0a33-7311-4c46-9722-d44d42f00b74\") " pod="openshift-marketplace/redhat-marketplace-ghwkc" Nov 26 06:10:50 crc kubenswrapper[4871]: I1126 06:10:50.660422 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c10b0a33-7311-4c46-9722-d44d42f00b74-catalog-content\") pod \"redhat-marketplace-ghwkc\" (UID: \"c10b0a33-7311-4c46-9722-d44d42f00b74\") " pod="openshift-marketplace/redhat-marketplace-ghwkc" Nov 26 06:10:50 crc kubenswrapper[4871]: I1126 06:10:50.762514 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c10b0a33-7311-4c46-9722-d44d42f00b74-utilities\") pod \"redhat-marketplace-ghwkc\" (UID: \"c10b0a33-7311-4c46-9722-d44d42f00b74\") " pod="openshift-marketplace/redhat-marketplace-ghwkc" Nov 26 06:10:50 crc kubenswrapper[4871]: I1126 06:10:50.762611 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhv67\" (UniqueName: \"kubernetes.io/projected/c10b0a33-7311-4c46-9722-d44d42f00b74-kube-api-access-zhv67\") pod \"redhat-marketplace-ghwkc\" (UID: \"c10b0a33-7311-4c46-9722-d44d42f00b74\") " pod="openshift-marketplace/redhat-marketplace-ghwkc" Nov 26 06:10:50 crc kubenswrapper[4871]: I1126 06:10:50.762660 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c10b0a33-7311-4c46-9722-d44d42f00b74-catalog-content\") pod \"redhat-marketplace-ghwkc\" (UID: \"c10b0a33-7311-4c46-9722-d44d42f00b74\") " pod="openshift-marketplace/redhat-marketplace-ghwkc" Nov 26 06:10:50 crc kubenswrapper[4871]: I1126 06:10:50.763274 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c10b0a33-7311-4c46-9722-d44d42f00b74-utilities\") pod \"redhat-marketplace-ghwkc\" (UID: \"c10b0a33-7311-4c46-9722-d44d42f00b74\") " pod="openshift-marketplace/redhat-marketplace-ghwkc" Nov 26 06:10:50 crc kubenswrapper[4871]: I1126 06:10:50.763285 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c10b0a33-7311-4c46-9722-d44d42f00b74-catalog-content\") pod \"redhat-marketplace-ghwkc\" (UID: \"c10b0a33-7311-4c46-9722-d44d42f00b74\") " pod="openshift-marketplace/redhat-marketplace-ghwkc" Nov 26 06:10:50 crc kubenswrapper[4871]: I1126 06:10:50.790304 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhv67\" (UniqueName: \"kubernetes.io/projected/c10b0a33-7311-4c46-9722-d44d42f00b74-kube-api-access-zhv67\") pod \"redhat-marketplace-ghwkc\" (UID: \"c10b0a33-7311-4c46-9722-d44d42f00b74\") " pod="openshift-marketplace/redhat-marketplace-ghwkc" Nov 26 06:10:50 crc kubenswrapper[4871]: I1126 06:10:50.895572 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ghwkc" Nov 26 06:10:51 crc kubenswrapper[4871]: I1126 06:10:51.382410 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ghwkc"] Nov 26 06:10:51 crc kubenswrapper[4871]: W1126 06:10:51.391335 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc10b0a33_7311_4c46_9722_d44d42f00b74.slice/crio-bcf528b194c7cb7a7dbac527e50528677262aa6917e3c0b23464b4989a300296 WatchSource:0}: Error finding container bcf528b194c7cb7a7dbac527e50528677262aa6917e3c0b23464b4989a300296: Status 404 returned error can't find the container with id bcf528b194c7cb7a7dbac527e50528677262aa6917e3c0b23464b4989a300296 Nov 26 06:10:51 crc kubenswrapper[4871]: I1126 06:10:51.578983 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ghwkc" event={"ID":"c10b0a33-7311-4c46-9722-d44d42f00b74","Type":"ContainerStarted","Data":"bcf528b194c7cb7a7dbac527e50528677262aa6917e3c0b23464b4989a300296"} Nov 26 06:10:52 crc kubenswrapper[4871]: I1126 06:10:52.590442 4871 generic.go:334] "Generic (PLEG): container finished" podID="c10b0a33-7311-4c46-9722-d44d42f00b74" containerID="ebf7de1e87b981562f64693a3d5fe1021a7bc0b0ed7067ae5950995d1647968c" exitCode=0 Nov 26 06:10:52 crc kubenswrapper[4871]: I1126 06:10:52.590488 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ghwkc" event={"ID":"c10b0a33-7311-4c46-9722-d44d42f00b74","Type":"ContainerDied","Data":"ebf7de1e87b981562f64693a3d5fe1021a7bc0b0ed7067ae5950995d1647968c"} Nov 26 06:10:53 crc kubenswrapper[4871]: I1126 06:10:53.615137 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:10:53 crc kubenswrapper[4871]: I1126 06:10:53.615611 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:10:54 crc kubenswrapper[4871]: I1126 06:10:54.613137 4871 generic.go:334] "Generic (PLEG): container finished" podID="c10b0a33-7311-4c46-9722-d44d42f00b74" containerID="95261e02bb9b99ae524bdb3baaf3bc09e4e6edfc54bf08b3f43ede79d45a84fd" exitCode=0 Nov 26 06:10:54 crc kubenswrapper[4871]: I1126 06:10:54.613466 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ghwkc" event={"ID":"c10b0a33-7311-4c46-9722-d44d42f00b74","Type":"ContainerDied","Data":"95261e02bb9b99ae524bdb3baaf3bc09e4e6edfc54bf08b3f43ede79d45a84fd"} Nov 26 06:10:55 crc kubenswrapper[4871]: I1126 06:10:55.626126 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ghwkc" event={"ID":"c10b0a33-7311-4c46-9722-d44d42f00b74","Type":"ContainerStarted","Data":"970b4bf7dc340c03d2a8fb03a975e5115c3c3a6db8bd876a1c47208dcea0e67a"} Nov 26 06:10:55 crc kubenswrapper[4871]: I1126 06:10:55.671225 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ghwkc" podStartSLOduration=3.186831271 podStartE2EDuration="5.6712064s" podCreationTimestamp="2025-11-26 06:10:50 +0000 UTC" firstStartedPulling="2025-11-26 06:10:52.592114793 +0000 UTC m=+2710.775166389" lastFinishedPulling="2025-11-26 06:10:55.076489922 +0000 UTC m=+2713.259541518" observedRunningTime="2025-11-26 06:10:55.661777777 +0000 UTC m=+2713.844829383" watchObservedRunningTime="2025-11-26 06:10:55.6712064 +0000 UTC m=+2713.854257986" Nov 26 06:11:00 crc kubenswrapper[4871]: I1126 06:11:00.895949 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ghwkc" Nov 26 06:11:00 crc kubenswrapper[4871]: I1126 06:11:00.896588 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ghwkc" Nov 26 06:11:00 crc kubenswrapper[4871]: I1126 06:11:00.978990 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ghwkc" Nov 26 06:11:01 crc kubenswrapper[4871]: I1126 06:11:01.752985 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ghwkc" Nov 26 06:11:01 crc kubenswrapper[4871]: I1126 06:11:01.806724 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ghwkc"] Nov 26 06:11:03 crc kubenswrapper[4871]: I1126 06:11:03.720102 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ghwkc" podUID="c10b0a33-7311-4c46-9722-d44d42f00b74" containerName="registry-server" containerID="cri-o://970b4bf7dc340c03d2a8fb03a975e5115c3c3a6db8bd876a1c47208dcea0e67a" gracePeriod=2 Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.158136 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ghwkc" Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.344035 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c10b0a33-7311-4c46-9722-d44d42f00b74-utilities\") pod \"c10b0a33-7311-4c46-9722-d44d42f00b74\" (UID: \"c10b0a33-7311-4c46-9722-d44d42f00b74\") " Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.344088 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c10b0a33-7311-4c46-9722-d44d42f00b74-catalog-content\") pod \"c10b0a33-7311-4c46-9722-d44d42f00b74\" (UID: \"c10b0a33-7311-4c46-9722-d44d42f00b74\") " Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.344145 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zhv67\" (UniqueName: \"kubernetes.io/projected/c10b0a33-7311-4c46-9722-d44d42f00b74-kube-api-access-zhv67\") pod \"c10b0a33-7311-4c46-9722-d44d42f00b74\" (UID: \"c10b0a33-7311-4c46-9722-d44d42f00b74\") " Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.344957 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c10b0a33-7311-4c46-9722-d44d42f00b74-utilities" (OuterVolumeSpecName: "utilities") pod "c10b0a33-7311-4c46-9722-d44d42f00b74" (UID: "c10b0a33-7311-4c46-9722-d44d42f00b74"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.354692 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c10b0a33-7311-4c46-9722-d44d42f00b74-kube-api-access-zhv67" (OuterVolumeSpecName: "kube-api-access-zhv67") pod "c10b0a33-7311-4c46-9722-d44d42f00b74" (UID: "c10b0a33-7311-4c46-9722-d44d42f00b74"). InnerVolumeSpecName "kube-api-access-zhv67". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.361315 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c10b0a33-7311-4c46-9722-d44d42f00b74-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c10b0a33-7311-4c46-9722-d44d42f00b74" (UID: "c10b0a33-7311-4c46-9722-d44d42f00b74"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.446242 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c10b0a33-7311-4c46-9722-d44d42f00b74-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.446279 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c10b0a33-7311-4c46-9722-d44d42f00b74-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.446290 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zhv67\" (UniqueName: \"kubernetes.io/projected/c10b0a33-7311-4c46-9722-d44d42f00b74-kube-api-access-zhv67\") on node \"crc\" DevicePath \"\"" Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.742743 4871 generic.go:334] "Generic (PLEG): container finished" podID="c10b0a33-7311-4c46-9722-d44d42f00b74" containerID="970b4bf7dc340c03d2a8fb03a975e5115c3c3a6db8bd876a1c47208dcea0e67a" exitCode=0 Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.742810 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ghwkc" Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.742815 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ghwkc" event={"ID":"c10b0a33-7311-4c46-9722-d44d42f00b74","Type":"ContainerDied","Data":"970b4bf7dc340c03d2a8fb03a975e5115c3c3a6db8bd876a1c47208dcea0e67a"} Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.744085 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ghwkc" event={"ID":"c10b0a33-7311-4c46-9722-d44d42f00b74","Type":"ContainerDied","Data":"bcf528b194c7cb7a7dbac527e50528677262aa6917e3c0b23464b4989a300296"} Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.744123 4871 scope.go:117] "RemoveContainer" containerID="970b4bf7dc340c03d2a8fb03a975e5115c3c3a6db8bd876a1c47208dcea0e67a" Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.771374 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ghwkc"] Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.786930 4871 scope.go:117] "RemoveContainer" containerID="95261e02bb9b99ae524bdb3baaf3bc09e4e6edfc54bf08b3f43ede79d45a84fd" Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.789730 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ghwkc"] Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.818107 4871 scope.go:117] "RemoveContainer" containerID="ebf7de1e87b981562f64693a3d5fe1021a7bc0b0ed7067ae5950995d1647968c" Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.864951 4871 scope.go:117] "RemoveContainer" containerID="970b4bf7dc340c03d2a8fb03a975e5115c3c3a6db8bd876a1c47208dcea0e67a" Nov 26 06:11:04 crc kubenswrapper[4871]: E1126 06:11:04.865341 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"970b4bf7dc340c03d2a8fb03a975e5115c3c3a6db8bd876a1c47208dcea0e67a\": container with ID starting with 970b4bf7dc340c03d2a8fb03a975e5115c3c3a6db8bd876a1c47208dcea0e67a not found: ID does not exist" containerID="970b4bf7dc340c03d2a8fb03a975e5115c3c3a6db8bd876a1c47208dcea0e67a" Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.865382 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"970b4bf7dc340c03d2a8fb03a975e5115c3c3a6db8bd876a1c47208dcea0e67a"} err="failed to get container status \"970b4bf7dc340c03d2a8fb03a975e5115c3c3a6db8bd876a1c47208dcea0e67a\": rpc error: code = NotFound desc = could not find container \"970b4bf7dc340c03d2a8fb03a975e5115c3c3a6db8bd876a1c47208dcea0e67a\": container with ID starting with 970b4bf7dc340c03d2a8fb03a975e5115c3c3a6db8bd876a1c47208dcea0e67a not found: ID does not exist" Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.865407 4871 scope.go:117] "RemoveContainer" containerID="95261e02bb9b99ae524bdb3baaf3bc09e4e6edfc54bf08b3f43ede79d45a84fd" Nov 26 06:11:04 crc kubenswrapper[4871]: E1126 06:11:04.865887 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95261e02bb9b99ae524bdb3baaf3bc09e4e6edfc54bf08b3f43ede79d45a84fd\": container with ID starting with 95261e02bb9b99ae524bdb3baaf3bc09e4e6edfc54bf08b3f43ede79d45a84fd not found: ID does not exist" containerID="95261e02bb9b99ae524bdb3baaf3bc09e4e6edfc54bf08b3f43ede79d45a84fd" Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.865915 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95261e02bb9b99ae524bdb3baaf3bc09e4e6edfc54bf08b3f43ede79d45a84fd"} err="failed to get container status \"95261e02bb9b99ae524bdb3baaf3bc09e4e6edfc54bf08b3f43ede79d45a84fd\": rpc error: code = NotFound desc = could not find container \"95261e02bb9b99ae524bdb3baaf3bc09e4e6edfc54bf08b3f43ede79d45a84fd\": container with ID starting with 95261e02bb9b99ae524bdb3baaf3bc09e4e6edfc54bf08b3f43ede79d45a84fd not found: ID does not exist" Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.865933 4871 scope.go:117] "RemoveContainer" containerID="ebf7de1e87b981562f64693a3d5fe1021a7bc0b0ed7067ae5950995d1647968c" Nov 26 06:11:04 crc kubenswrapper[4871]: E1126 06:11:04.866174 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebf7de1e87b981562f64693a3d5fe1021a7bc0b0ed7067ae5950995d1647968c\": container with ID starting with ebf7de1e87b981562f64693a3d5fe1021a7bc0b0ed7067ae5950995d1647968c not found: ID does not exist" containerID="ebf7de1e87b981562f64693a3d5fe1021a7bc0b0ed7067ae5950995d1647968c" Nov 26 06:11:04 crc kubenswrapper[4871]: I1126 06:11:04.866218 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebf7de1e87b981562f64693a3d5fe1021a7bc0b0ed7067ae5950995d1647968c"} err="failed to get container status \"ebf7de1e87b981562f64693a3d5fe1021a7bc0b0ed7067ae5950995d1647968c\": rpc error: code = NotFound desc = could not find container \"ebf7de1e87b981562f64693a3d5fe1021a7bc0b0ed7067ae5950995d1647968c\": container with ID starting with ebf7de1e87b981562f64693a3d5fe1021a7bc0b0ed7067ae5950995d1647968c not found: ID does not exist" Nov 26 06:11:06 crc kubenswrapper[4871]: I1126 06:11:06.522823 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c10b0a33-7311-4c46-9722-d44d42f00b74" path="/var/lib/kubelet/pods/c10b0a33-7311-4c46-9722-d44d42f00b74/volumes" Nov 26 06:11:23 crc kubenswrapper[4871]: I1126 06:11:23.615419 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:11:23 crc kubenswrapper[4871]: I1126 06:11:23.616039 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:11:23 crc kubenswrapper[4871]: I1126 06:11:23.616099 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 06:11:23 crc kubenswrapper[4871]: I1126 06:11:23.617136 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f70aea01c68d7922032007da61abc4f689feb69d32698e40339cf47f34bc06bf"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 06:11:23 crc kubenswrapper[4871]: I1126 06:11:23.617218 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://f70aea01c68d7922032007da61abc4f689feb69d32698e40339cf47f34bc06bf" gracePeriod=600 Nov 26 06:11:23 crc kubenswrapper[4871]: I1126 06:11:23.966238 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="f70aea01c68d7922032007da61abc4f689feb69d32698e40339cf47f34bc06bf" exitCode=0 Nov 26 06:11:23 crc kubenswrapper[4871]: I1126 06:11:23.966321 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"f70aea01c68d7922032007da61abc4f689feb69d32698e40339cf47f34bc06bf"} Nov 26 06:11:23 crc kubenswrapper[4871]: I1126 06:11:23.966688 4871 scope.go:117] "RemoveContainer" containerID="5748aaf4303b838e6c75c175fe14917fd5dde5559161f2a568f22b605d2df5f0" Nov 26 06:11:24 crc kubenswrapper[4871]: I1126 06:11:24.983052 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f"} Nov 26 06:11:27 crc kubenswrapper[4871]: I1126 06:11:27.953769 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-s7qkn"] Nov 26 06:11:27 crc kubenswrapper[4871]: E1126 06:11:27.955193 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c10b0a33-7311-4c46-9722-d44d42f00b74" containerName="registry-server" Nov 26 06:11:27 crc kubenswrapper[4871]: I1126 06:11:27.955219 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="c10b0a33-7311-4c46-9722-d44d42f00b74" containerName="registry-server" Nov 26 06:11:27 crc kubenswrapper[4871]: E1126 06:11:27.955252 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c10b0a33-7311-4c46-9722-d44d42f00b74" containerName="extract-utilities" Nov 26 06:11:27 crc kubenswrapper[4871]: I1126 06:11:27.955266 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="c10b0a33-7311-4c46-9722-d44d42f00b74" containerName="extract-utilities" Nov 26 06:11:27 crc kubenswrapper[4871]: E1126 06:11:27.955302 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c10b0a33-7311-4c46-9722-d44d42f00b74" containerName="extract-content" Nov 26 06:11:27 crc kubenswrapper[4871]: I1126 06:11:27.955319 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="c10b0a33-7311-4c46-9722-d44d42f00b74" containerName="extract-content" Nov 26 06:11:27 crc kubenswrapper[4871]: I1126 06:11:27.955768 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="c10b0a33-7311-4c46-9722-d44d42f00b74" containerName="registry-server" Nov 26 06:11:27 crc kubenswrapper[4871]: I1126 06:11:27.958338 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s7qkn" Nov 26 06:11:27 crc kubenswrapper[4871]: I1126 06:11:27.969977 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-s7qkn"] Nov 26 06:11:28 crc kubenswrapper[4871]: I1126 06:11:28.054224 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l55nk\" (UniqueName: \"kubernetes.io/projected/88cc811c-5779-4c87-8736-52b9635334ce-kube-api-access-l55nk\") pod \"certified-operators-s7qkn\" (UID: \"88cc811c-5779-4c87-8736-52b9635334ce\") " pod="openshift-marketplace/certified-operators-s7qkn" Nov 26 06:11:28 crc kubenswrapper[4871]: I1126 06:11:28.054283 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88cc811c-5779-4c87-8736-52b9635334ce-catalog-content\") pod \"certified-operators-s7qkn\" (UID: \"88cc811c-5779-4c87-8736-52b9635334ce\") " pod="openshift-marketplace/certified-operators-s7qkn" Nov 26 06:11:28 crc kubenswrapper[4871]: I1126 06:11:28.054420 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88cc811c-5779-4c87-8736-52b9635334ce-utilities\") pod \"certified-operators-s7qkn\" (UID: \"88cc811c-5779-4c87-8736-52b9635334ce\") " pod="openshift-marketplace/certified-operators-s7qkn" Nov 26 06:11:28 crc kubenswrapper[4871]: I1126 06:11:28.156050 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l55nk\" (UniqueName: \"kubernetes.io/projected/88cc811c-5779-4c87-8736-52b9635334ce-kube-api-access-l55nk\") pod \"certified-operators-s7qkn\" (UID: \"88cc811c-5779-4c87-8736-52b9635334ce\") " pod="openshift-marketplace/certified-operators-s7qkn" Nov 26 06:11:28 crc kubenswrapper[4871]: I1126 06:11:28.156650 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88cc811c-5779-4c87-8736-52b9635334ce-catalog-content\") pod \"certified-operators-s7qkn\" (UID: \"88cc811c-5779-4c87-8736-52b9635334ce\") " pod="openshift-marketplace/certified-operators-s7qkn" Nov 26 06:11:28 crc kubenswrapper[4871]: I1126 06:11:28.156753 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88cc811c-5779-4c87-8736-52b9635334ce-utilities\") pod \"certified-operators-s7qkn\" (UID: \"88cc811c-5779-4c87-8736-52b9635334ce\") " pod="openshift-marketplace/certified-operators-s7qkn" Nov 26 06:11:28 crc kubenswrapper[4871]: I1126 06:11:28.157233 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88cc811c-5779-4c87-8736-52b9635334ce-catalog-content\") pod \"certified-operators-s7qkn\" (UID: \"88cc811c-5779-4c87-8736-52b9635334ce\") " pod="openshift-marketplace/certified-operators-s7qkn" Nov 26 06:11:28 crc kubenswrapper[4871]: I1126 06:11:28.157473 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88cc811c-5779-4c87-8736-52b9635334ce-utilities\") pod \"certified-operators-s7qkn\" (UID: \"88cc811c-5779-4c87-8736-52b9635334ce\") " pod="openshift-marketplace/certified-operators-s7qkn" Nov 26 06:11:28 crc kubenswrapper[4871]: I1126 06:11:28.183617 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l55nk\" (UniqueName: \"kubernetes.io/projected/88cc811c-5779-4c87-8736-52b9635334ce-kube-api-access-l55nk\") pod \"certified-operators-s7qkn\" (UID: \"88cc811c-5779-4c87-8736-52b9635334ce\") " pod="openshift-marketplace/certified-operators-s7qkn" Nov 26 06:11:28 crc kubenswrapper[4871]: I1126 06:11:28.294923 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s7qkn" Nov 26 06:11:29 crc kubenswrapper[4871]: I1126 06:11:28.603232 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-s7qkn"] Nov 26 06:11:29 crc kubenswrapper[4871]: I1126 06:11:28.785500 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-nxg7c"] Nov 26 06:11:29 crc kubenswrapper[4871]: I1126 06:11:28.788309 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nxg7c" Nov 26 06:11:29 crc kubenswrapper[4871]: I1126 06:11:28.821570 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nxg7c"] Nov 26 06:11:29 crc kubenswrapper[4871]: I1126 06:11:28.874713 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkvt9\" (UniqueName: \"kubernetes.io/projected/77cf21d1-ae5f-48c3-ad54-428adeb822a9-kube-api-access-wkvt9\") pod \"community-operators-nxg7c\" (UID: \"77cf21d1-ae5f-48c3-ad54-428adeb822a9\") " pod="openshift-marketplace/community-operators-nxg7c" Nov 26 06:11:29 crc kubenswrapper[4871]: I1126 06:11:28.874758 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77cf21d1-ae5f-48c3-ad54-428adeb822a9-catalog-content\") pod \"community-operators-nxg7c\" (UID: \"77cf21d1-ae5f-48c3-ad54-428adeb822a9\") " pod="openshift-marketplace/community-operators-nxg7c" Nov 26 06:11:29 crc kubenswrapper[4871]: I1126 06:11:28.874805 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77cf21d1-ae5f-48c3-ad54-428adeb822a9-utilities\") pod \"community-operators-nxg7c\" (UID: \"77cf21d1-ae5f-48c3-ad54-428adeb822a9\") " pod="openshift-marketplace/community-operators-nxg7c" Nov 26 06:11:29 crc kubenswrapper[4871]: I1126 06:11:28.977233 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkvt9\" (UniqueName: \"kubernetes.io/projected/77cf21d1-ae5f-48c3-ad54-428adeb822a9-kube-api-access-wkvt9\") pod \"community-operators-nxg7c\" (UID: \"77cf21d1-ae5f-48c3-ad54-428adeb822a9\") " pod="openshift-marketplace/community-operators-nxg7c" Nov 26 06:11:29 crc kubenswrapper[4871]: I1126 06:11:28.977290 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77cf21d1-ae5f-48c3-ad54-428adeb822a9-catalog-content\") pod \"community-operators-nxg7c\" (UID: \"77cf21d1-ae5f-48c3-ad54-428adeb822a9\") " pod="openshift-marketplace/community-operators-nxg7c" Nov 26 06:11:29 crc kubenswrapper[4871]: I1126 06:11:28.977339 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77cf21d1-ae5f-48c3-ad54-428adeb822a9-utilities\") pod \"community-operators-nxg7c\" (UID: \"77cf21d1-ae5f-48c3-ad54-428adeb822a9\") " pod="openshift-marketplace/community-operators-nxg7c" Nov 26 06:11:29 crc kubenswrapper[4871]: I1126 06:11:28.977913 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77cf21d1-ae5f-48c3-ad54-428adeb822a9-utilities\") pod \"community-operators-nxg7c\" (UID: \"77cf21d1-ae5f-48c3-ad54-428adeb822a9\") " pod="openshift-marketplace/community-operators-nxg7c" Nov 26 06:11:29 crc kubenswrapper[4871]: I1126 06:11:28.977913 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77cf21d1-ae5f-48c3-ad54-428adeb822a9-catalog-content\") pod \"community-operators-nxg7c\" (UID: \"77cf21d1-ae5f-48c3-ad54-428adeb822a9\") " pod="openshift-marketplace/community-operators-nxg7c" Nov 26 06:11:29 crc kubenswrapper[4871]: I1126 06:11:28.998269 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkvt9\" (UniqueName: \"kubernetes.io/projected/77cf21d1-ae5f-48c3-ad54-428adeb822a9-kube-api-access-wkvt9\") pod \"community-operators-nxg7c\" (UID: \"77cf21d1-ae5f-48c3-ad54-428adeb822a9\") " pod="openshift-marketplace/community-operators-nxg7c" Nov 26 06:11:29 crc kubenswrapper[4871]: I1126 06:11:29.043711 4871 generic.go:334] "Generic (PLEG): container finished" podID="88cc811c-5779-4c87-8736-52b9635334ce" containerID="6ea453e459c4d0502fa0154f4cd7b891b48b1397c82da95c2c4cc3c92b7f77ad" exitCode=0 Nov 26 06:11:29 crc kubenswrapper[4871]: I1126 06:11:29.043755 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s7qkn" event={"ID":"88cc811c-5779-4c87-8736-52b9635334ce","Type":"ContainerDied","Data":"6ea453e459c4d0502fa0154f4cd7b891b48b1397c82da95c2c4cc3c92b7f77ad"} Nov 26 06:11:29 crc kubenswrapper[4871]: I1126 06:11:29.043780 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s7qkn" event={"ID":"88cc811c-5779-4c87-8736-52b9635334ce","Type":"ContainerStarted","Data":"fd39bc6457847c44f0dd582c984bc829e6dd422e54fc19ddaff47a392cb040bd"} Nov 26 06:11:29 crc kubenswrapper[4871]: I1126 06:11:29.139689 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nxg7c" Nov 26 06:11:29 crc kubenswrapper[4871]: I1126 06:11:29.667240 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nxg7c"] Nov 26 06:11:29 crc kubenswrapper[4871]: W1126 06:11:29.671029 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod77cf21d1_ae5f_48c3_ad54_428adeb822a9.slice/crio-0b458d7a4a7154e6353167c78369476ffc45386fe951b916eca0b08507336652 WatchSource:0}: Error finding container 0b458d7a4a7154e6353167c78369476ffc45386fe951b916eca0b08507336652: Status 404 returned error can't find the container with id 0b458d7a4a7154e6353167c78369476ffc45386fe951b916eca0b08507336652 Nov 26 06:11:30 crc kubenswrapper[4871]: I1126 06:11:30.066159 4871 generic.go:334] "Generic (PLEG): container finished" podID="77cf21d1-ae5f-48c3-ad54-428adeb822a9" containerID="42853a9301d46506134c49cc6f93bb654c48c8ead3c73913ea32a8ee3a9965a3" exitCode=0 Nov 26 06:11:30 crc kubenswrapper[4871]: I1126 06:11:30.066212 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nxg7c" event={"ID":"77cf21d1-ae5f-48c3-ad54-428adeb822a9","Type":"ContainerDied","Data":"42853a9301d46506134c49cc6f93bb654c48c8ead3c73913ea32a8ee3a9965a3"} Nov 26 06:11:30 crc kubenswrapper[4871]: I1126 06:11:30.066240 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nxg7c" event={"ID":"77cf21d1-ae5f-48c3-ad54-428adeb822a9","Type":"ContainerStarted","Data":"0b458d7a4a7154e6353167c78369476ffc45386fe951b916eca0b08507336652"} Nov 26 06:11:31 crc kubenswrapper[4871]: I1126 06:11:31.077401 4871 generic.go:334] "Generic (PLEG): container finished" podID="88cc811c-5779-4c87-8736-52b9635334ce" containerID="805c020008c3ac6590bb5a3b78afc3ee0190bad9756d29c17e24fddacb03d644" exitCode=0 Nov 26 06:11:31 crc kubenswrapper[4871]: I1126 06:11:31.077496 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s7qkn" event={"ID":"88cc811c-5779-4c87-8736-52b9635334ce","Type":"ContainerDied","Data":"805c020008c3ac6590bb5a3b78afc3ee0190bad9756d29c17e24fddacb03d644"} Nov 26 06:11:31 crc kubenswrapper[4871]: I1126 06:11:31.080849 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nxg7c" event={"ID":"77cf21d1-ae5f-48c3-ad54-428adeb822a9","Type":"ContainerStarted","Data":"0cc43b86632b879703cf4429088a62b9a8c0d545c2ef6b2c748862ed59f604af"} Nov 26 06:11:31 crc kubenswrapper[4871]: I1126 06:11:31.749310 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dtv6h"] Nov 26 06:11:31 crc kubenswrapper[4871]: I1126 06:11:31.752361 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dtv6h" Nov 26 06:11:31 crc kubenswrapper[4871]: I1126 06:11:31.762209 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dtv6h"] Nov 26 06:11:31 crc kubenswrapper[4871]: I1126 06:11:31.835811 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80ef13e3-9fdd-465f-bca8-8d2a14e7ef42-utilities\") pod \"redhat-operators-dtv6h\" (UID: \"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42\") " pod="openshift-marketplace/redhat-operators-dtv6h" Nov 26 06:11:31 crc kubenswrapper[4871]: I1126 06:11:31.835974 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znf6f\" (UniqueName: \"kubernetes.io/projected/80ef13e3-9fdd-465f-bca8-8d2a14e7ef42-kube-api-access-znf6f\") pod \"redhat-operators-dtv6h\" (UID: \"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42\") " pod="openshift-marketplace/redhat-operators-dtv6h" Nov 26 06:11:31 crc kubenswrapper[4871]: I1126 06:11:31.836024 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80ef13e3-9fdd-465f-bca8-8d2a14e7ef42-catalog-content\") pod \"redhat-operators-dtv6h\" (UID: \"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42\") " pod="openshift-marketplace/redhat-operators-dtv6h" Nov 26 06:11:31 crc kubenswrapper[4871]: I1126 06:11:31.937910 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80ef13e3-9fdd-465f-bca8-8d2a14e7ef42-utilities\") pod \"redhat-operators-dtv6h\" (UID: \"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42\") " pod="openshift-marketplace/redhat-operators-dtv6h" Nov 26 06:11:31 crc kubenswrapper[4871]: I1126 06:11:31.938074 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znf6f\" (UniqueName: \"kubernetes.io/projected/80ef13e3-9fdd-465f-bca8-8d2a14e7ef42-kube-api-access-znf6f\") pod \"redhat-operators-dtv6h\" (UID: \"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42\") " pod="openshift-marketplace/redhat-operators-dtv6h" Nov 26 06:11:31 crc kubenswrapper[4871]: I1126 06:11:31.938123 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80ef13e3-9fdd-465f-bca8-8d2a14e7ef42-catalog-content\") pod \"redhat-operators-dtv6h\" (UID: \"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42\") " pod="openshift-marketplace/redhat-operators-dtv6h" Nov 26 06:11:31 crc kubenswrapper[4871]: I1126 06:11:31.938690 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80ef13e3-9fdd-465f-bca8-8d2a14e7ef42-catalog-content\") pod \"redhat-operators-dtv6h\" (UID: \"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42\") " pod="openshift-marketplace/redhat-operators-dtv6h" Nov 26 06:11:31 crc kubenswrapper[4871]: I1126 06:11:31.938980 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80ef13e3-9fdd-465f-bca8-8d2a14e7ef42-utilities\") pod \"redhat-operators-dtv6h\" (UID: \"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42\") " pod="openshift-marketplace/redhat-operators-dtv6h" Nov 26 06:11:31 crc kubenswrapper[4871]: I1126 06:11:31.964129 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znf6f\" (UniqueName: \"kubernetes.io/projected/80ef13e3-9fdd-465f-bca8-8d2a14e7ef42-kube-api-access-znf6f\") pod \"redhat-operators-dtv6h\" (UID: \"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42\") " pod="openshift-marketplace/redhat-operators-dtv6h" Nov 26 06:11:32 crc kubenswrapper[4871]: I1126 06:11:32.079641 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dtv6h" Nov 26 06:11:32 crc kubenswrapper[4871]: I1126 06:11:32.091792 4871 generic.go:334] "Generic (PLEG): container finished" podID="77cf21d1-ae5f-48c3-ad54-428adeb822a9" containerID="0cc43b86632b879703cf4429088a62b9a8c0d545c2ef6b2c748862ed59f604af" exitCode=0 Nov 26 06:11:32 crc kubenswrapper[4871]: I1126 06:11:32.091855 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nxg7c" event={"ID":"77cf21d1-ae5f-48c3-ad54-428adeb822a9","Type":"ContainerDied","Data":"0cc43b86632b879703cf4429088a62b9a8c0d545c2ef6b2c748862ed59f604af"} Nov 26 06:11:32 crc kubenswrapper[4871]: I1126 06:11:32.095069 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s7qkn" event={"ID":"88cc811c-5779-4c87-8736-52b9635334ce","Type":"ContainerStarted","Data":"a3029a30647d9dd928ac4c24603184e595e3cc4fc1c5107b2ed61b42949458aa"} Nov 26 06:11:32 crc kubenswrapper[4871]: I1126 06:11:32.146413 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-s7qkn" podStartSLOduration=2.598200242 podStartE2EDuration="5.146393252s" podCreationTimestamp="2025-11-26 06:11:27 +0000 UTC" firstStartedPulling="2025-11-26 06:11:29.045508404 +0000 UTC m=+2747.228559990" lastFinishedPulling="2025-11-26 06:11:31.593701404 +0000 UTC m=+2749.776753000" observedRunningTime="2025-11-26 06:11:32.143701485 +0000 UTC m=+2750.326753071" watchObservedRunningTime="2025-11-26 06:11:32.146393252 +0000 UTC m=+2750.329444838" Nov 26 06:11:32 crc kubenswrapper[4871]: I1126 06:11:32.615097 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dtv6h"] Nov 26 06:11:33 crc kubenswrapper[4871]: I1126 06:11:33.106032 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dtv6h" event={"ID":"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42","Type":"ContainerStarted","Data":"a3bd6a0c820c700e4000cb23a96515b5a4cb5f5c8833975744b64f54b7c7e880"} Nov 26 06:11:33 crc kubenswrapper[4871]: I1126 06:11:33.106073 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dtv6h" event={"ID":"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42","Type":"ContainerStarted","Data":"6f2c5ee02f7cddb2921bd8b830b69b95f775afc7dd0515b77f1e6ac520086a5c"} Nov 26 06:11:33 crc kubenswrapper[4871]: I1126 06:11:33.109057 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nxg7c" event={"ID":"77cf21d1-ae5f-48c3-ad54-428adeb822a9","Type":"ContainerStarted","Data":"b8df4cc7b73374c4e0e7b908d331606f9728753abcd328fda6cc8cc68ea8bb26"} Nov 26 06:11:33 crc kubenswrapper[4871]: I1126 06:11:33.143245 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-nxg7c" podStartSLOduration=2.680711537 podStartE2EDuration="5.143220885s" podCreationTimestamp="2025-11-26 06:11:28 +0000 UTC" firstStartedPulling="2025-11-26 06:11:30.068349802 +0000 UTC m=+2748.251401388" lastFinishedPulling="2025-11-26 06:11:32.53085915 +0000 UTC m=+2750.713910736" observedRunningTime="2025-11-26 06:11:33.141590324 +0000 UTC m=+2751.324641910" watchObservedRunningTime="2025-11-26 06:11:33.143220885 +0000 UTC m=+2751.326272481" Nov 26 06:11:34 crc kubenswrapper[4871]: I1126 06:11:34.122401 4871 generic.go:334] "Generic (PLEG): container finished" podID="80ef13e3-9fdd-465f-bca8-8d2a14e7ef42" containerID="a3bd6a0c820c700e4000cb23a96515b5a4cb5f5c8833975744b64f54b7c7e880" exitCode=0 Nov 26 06:11:34 crc kubenswrapper[4871]: I1126 06:11:34.122560 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dtv6h" event={"ID":"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42","Type":"ContainerDied","Data":"a3bd6a0c820c700e4000cb23a96515b5a4cb5f5c8833975744b64f54b7c7e880"} Nov 26 06:11:35 crc kubenswrapper[4871]: I1126 06:11:35.135688 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dtv6h" event={"ID":"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42","Type":"ContainerStarted","Data":"0693353c2971c2e44ebe09261a0d84dcc08b82722552782f1974c1e787cb8b81"} Nov 26 06:11:38 crc kubenswrapper[4871]: I1126 06:11:38.295346 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-s7qkn" Nov 26 06:11:38 crc kubenswrapper[4871]: I1126 06:11:38.296021 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-s7qkn" Nov 26 06:11:38 crc kubenswrapper[4871]: I1126 06:11:38.364679 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-s7qkn" Nov 26 06:11:39 crc kubenswrapper[4871]: I1126 06:11:39.141127 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-nxg7c" Nov 26 06:11:39 crc kubenswrapper[4871]: I1126 06:11:39.141184 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-nxg7c" Nov 26 06:11:39 crc kubenswrapper[4871]: I1126 06:11:39.201967 4871 generic.go:334] "Generic (PLEG): container finished" podID="80ef13e3-9fdd-465f-bca8-8d2a14e7ef42" containerID="0693353c2971c2e44ebe09261a0d84dcc08b82722552782f1974c1e787cb8b81" exitCode=0 Nov 26 06:11:39 crc kubenswrapper[4871]: I1126 06:11:39.202099 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dtv6h" event={"ID":"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42","Type":"ContainerDied","Data":"0693353c2971c2e44ebe09261a0d84dcc08b82722552782f1974c1e787cb8b81"} Nov 26 06:11:39 crc kubenswrapper[4871]: I1126 06:11:39.203390 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-nxg7c" Nov 26 06:11:39 crc kubenswrapper[4871]: I1126 06:11:39.288278 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-s7qkn" Nov 26 06:11:39 crc kubenswrapper[4871]: I1126 06:11:39.291853 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-nxg7c" Nov 26 06:11:40 crc kubenswrapper[4871]: I1126 06:11:40.219634 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dtv6h" event={"ID":"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42","Type":"ContainerStarted","Data":"d154def9bcec3ac0304abe0e4b503ab6d851b1e2a20a29355a00b80404e381a0"} Nov 26 06:11:40 crc kubenswrapper[4871]: I1126 06:11:40.245351 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dtv6h" podStartSLOduration=3.762509685 podStartE2EDuration="9.245323992s" podCreationTimestamp="2025-11-26 06:11:31 +0000 UTC" firstStartedPulling="2025-11-26 06:11:34.125000436 +0000 UTC m=+2752.308052022" lastFinishedPulling="2025-11-26 06:11:39.607814733 +0000 UTC m=+2757.790866329" observedRunningTime="2025-11-26 06:11:40.2363672 +0000 UTC m=+2758.419418826" watchObservedRunningTime="2025-11-26 06:11:40.245323992 +0000 UTC m=+2758.428375608" Nov 26 06:11:41 crc kubenswrapper[4871]: I1126 06:11:41.522634 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nxg7c"] Nov 26 06:11:41 crc kubenswrapper[4871]: I1126 06:11:41.523061 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-nxg7c" podUID="77cf21d1-ae5f-48c3-ad54-428adeb822a9" containerName="registry-server" containerID="cri-o://b8df4cc7b73374c4e0e7b908d331606f9728753abcd328fda6cc8cc68ea8bb26" gracePeriod=2 Nov 26 06:11:41 crc kubenswrapper[4871]: I1126 06:11:41.728450 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-s7qkn"] Nov 26 06:11:41 crc kubenswrapper[4871]: I1126 06:11:41.729041 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-s7qkn" podUID="88cc811c-5779-4c87-8736-52b9635334ce" containerName="registry-server" containerID="cri-o://a3029a30647d9dd928ac4c24603184e595e3cc4fc1c5107b2ed61b42949458aa" gracePeriod=2 Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.032654 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nxg7c" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.082206 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dtv6h" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.083442 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dtv6h" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.144891 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77cf21d1-ae5f-48c3-ad54-428adeb822a9-catalog-content\") pod \"77cf21d1-ae5f-48c3-ad54-428adeb822a9\" (UID: \"77cf21d1-ae5f-48c3-ad54-428adeb822a9\") " Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.145598 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77cf21d1-ae5f-48c3-ad54-428adeb822a9-utilities\") pod \"77cf21d1-ae5f-48c3-ad54-428adeb822a9\" (UID: \"77cf21d1-ae5f-48c3-ad54-428adeb822a9\") " Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.146271 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s7qkn" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.146574 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77cf21d1-ae5f-48c3-ad54-428adeb822a9-utilities" (OuterVolumeSpecName: "utilities") pod "77cf21d1-ae5f-48c3-ad54-428adeb822a9" (UID: "77cf21d1-ae5f-48c3-ad54-428adeb822a9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.147166 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wkvt9\" (UniqueName: \"kubernetes.io/projected/77cf21d1-ae5f-48c3-ad54-428adeb822a9-kube-api-access-wkvt9\") pod \"77cf21d1-ae5f-48c3-ad54-428adeb822a9\" (UID: \"77cf21d1-ae5f-48c3-ad54-428adeb822a9\") " Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.149375 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77cf21d1-ae5f-48c3-ad54-428adeb822a9-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.154279 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77cf21d1-ae5f-48c3-ad54-428adeb822a9-kube-api-access-wkvt9" (OuterVolumeSpecName: "kube-api-access-wkvt9") pod "77cf21d1-ae5f-48c3-ad54-428adeb822a9" (UID: "77cf21d1-ae5f-48c3-ad54-428adeb822a9"). InnerVolumeSpecName "kube-api-access-wkvt9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.197488 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77cf21d1-ae5f-48c3-ad54-428adeb822a9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "77cf21d1-ae5f-48c3-ad54-428adeb822a9" (UID: "77cf21d1-ae5f-48c3-ad54-428adeb822a9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.246154 4871 generic.go:334] "Generic (PLEG): container finished" podID="88cc811c-5779-4c87-8736-52b9635334ce" containerID="a3029a30647d9dd928ac4c24603184e595e3cc4fc1c5107b2ed61b42949458aa" exitCode=0 Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.246343 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s7qkn" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.246718 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s7qkn" event={"ID":"88cc811c-5779-4c87-8736-52b9635334ce","Type":"ContainerDied","Data":"a3029a30647d9dd928ac4c24603184e595e3cc4fc1c5107b2ed61b42949458aa"} Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.246792 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s7qkn" event={"ID":"88cc811c-5779-4c87-8736-52b9635334ce","Type":"ContainerDied","Data":"fd39bc6457847c44f0dd582c984bc829e6dd422e54fc19ddaff47a392cb040bd"} Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.246840 4871 scope.go:117] "RemoveContainer" containerID="a3029a30647d9dd928ac4c24603184e595e3cc4fc1c5107b2ed61b42949458aa" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.251180 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88cc811c-5779-4c87-8736-52b9635334ce-catalog-content\") pod \"88cc811c-5779-4c87-8736-52b9635334ce\" (UID: \"88cc811c-5779-4c87-8736-52b9635334ce\") " Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.251299 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88cc811c-5779-4c87-8736-52b9635334ce-utilities\") pod \"88cc811c-5779-4c87-8736-52b9635334ce\" (UID: \"88cc811c-5779-4c87-8736-52b9635334ce\") " Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.251418 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l55nk\" (UniqueName: \"kubernetes.io/projected/88cc811c-5779-4c87-8736-52b9635334ce-kube-api-access-l55nk\") pod \"88cc811c-5779-4c87-8736-52b9635334ce\" (UID: \"88cc811c-5779-4c87-8736-52b9635334ce\") " Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.251592 4871 generic.go:334] "Generic (PLEG): container finished" podID="77cf21d1-ae5f-48c3-ad54-428adeb822a9" containerID="b8df4cc7b73374c4e0e7b908d331606f9728753abcd328fda6cc8cc68ea8bb26" exitCode=0 Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.251962 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wkvt9\" (UniqueName: \"kubernetes.io/projected/77cf21d1-ae5f-48c3-ad54-428adeb822a9-kube-api-access-wkvt9\") on node \"crc\" DevicePath \"\"" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.252015 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nxg7c" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.252148 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77cf21d1-ae5f-48c3-ad54-428adeb822a9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.252266 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nxg7c" event={"ID":"77cf21d1-ae5f-48c3-ad54-428adeb822a9","Type":"ContainerDied","Data":"b8df4cc7b73374c4e0e7b908d331606f9728753abcd328fda6cc8cc68ea8bb26"} Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.252289 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nxg7c" event={"ID":"77cf21d1-ae5f-48c3-ad54-428adeb822a9","Type":"ContainerDied","Data":"0b458d7a4a7154e6353167c78369476ffc45386fe951b916eca0b08507336652"} Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.252729 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88cc811c-5779-4c87-8736-52b9635334ce-utilities" (OuterVolumeSpecName: "utilities") pod "88cc811c-5779-4c87-8736-52b9635334ce" (UID: "88cc811c-5779-4c87-8736-52b9635334ce"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.255176 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88cc811c-5779-4c87-8736-52b9635334ce-kube-api-access-l55nk" (OuterVolumeSpecName: "kube-api-access-l55nk") pod "88cc811c-5779-4c87-8736-52b9635334ce" (UID: "88cc811c-5779-4c87-8736-52b9635334ce"). InnerVolumeSpecName "kube-api-access-l55nk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.272464 4871 scope.go:117] "RemoveContainer" containerID="805c020008c3ac6590bb5a3b78afc3ee0190bad9756d29c17e24fddacb03d644" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.292103 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nxg7c"] Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.302891 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-nxg7c"] Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.306028 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88cc811c-5779-4c87-8736-52b9635334ce-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "88cc811c-5779-4c87-8736-52b9635334ce" (UID: "88cc811c-5779-4c87-8736-52b9635334ce"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.307748 4871 scope.go:117] "RemoveContainer" containerID="6ea453e459c4d0502fa0154f4cd7b891b48b1397c82da95c2c4cc3c92b7f77ad" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.340251 4871 scope.go:117] "RemoveContainer" containerID="a3029a30647d9dd928ac4c24603184e595e3cc4fc1c5107b2ed61b42949458aa" Nov 26 06:11:42 crc kubenswrapper[4871]: E1126 06:11:42.340737 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3029a30647d9dd928ac4c24603184e595e3cc4fc1c5107b2ed61b42949458aa\": container with ID starting with a3029a30647d9dd928ac4c24603184e595e3cc4fc1c5107b2ed61b42949458aa not found: ID does not exist" containerID="a3029a30647d9dd928ac4c24603184e595e3cc4fc1c5107b2ed61b42949458aa" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.340781 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3029a30647d9dd928ac4c24603184e595e3cc4fc1c5107b2ed61b42949458aa"} err="failed to get container status \"a3029a30647d9dd928ac4c24603184e595e3cc4fc1c5107b2ed61b42949458aa\": rpc error: code = NotFound desc = could not find container \"a3029a30647d9dd928ac4c24603184e595e3cc4fc1c5107b2ed61b42949458aa\": container with ID starting with a3029a30647d9dd928ac4c24603184e595e3cc4fc1c5107b2ed61b42949458aa not found: ID does not exist" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.340809 4871 scope.go:117] "RemoveContainer" containerID="805c020008c3ac6590bb5a3b78afc3ee0190bad9756d29c17e24fddacb03d644" Nov 26 06:11:42 crc kubenswrapper[4871]: E1126 06:11:42.341328 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"805c020008c3ac6590bb5a3b78afc3ee0190bad9756d29c17e24fddacb03d644\": container with ID starting with 805c020008c3ac6590bb5a3b78afc3ee0190bad9756d29c17e24fddacb03d644 not found: ID does not exist" containerID="805c020008c3ac6590bb5a3b78afc3ee0190bad9756d29c17e24fddacb03d644" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.341374 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"805c020008c3ac6590bb5a3b78afc3ee0190bad9756d29c17e24fddacb03d644"} err="failed to get container status \"805c020008c3ac6590bb5a3b78afc3ee0190bad9756d29c17e24fddacb03d644\": rpc error: code = NotFound desc = could not find container \"805c020008c3ac6590bb5a3b78afc3ee0190bad9756d29c17e24fddacb03d644\": container with ID starting with 805c020008c3ac6590bb5a3b78afc3ee0190bad9756d29c17e24fddacb03d644 not found: ID does not exist" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.341402 4871 scope.go:117] "RemoveContainer" containerID="6ea453e459c4d0502fa0154f4cd7b891b48b1397c82da95c2c4cc3c92b7f77ad" Nov 26 06:11:42 crc kubenswrapper[4871]: E1126 06:11:42.341724 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ea453e459c4d0502fa0154f4cd7b891b48b1397c82da95c2c4cc3c92b7f77ad\": container with ID starting with 6ea453e459c4d0502fa0154f4cd7b891b48b1397c82da95c2c4cc3c92b7f77ad not found: ID does not exist" containerID="6ea453e459c4d0502fa0154f4cd7b891b48b1397c82da95c2c4cc3c92b7f77ad" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.341754 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ea453e459c4d0502fa0154f4cd7b891b48b1397c82da95c2c4cc3c92b7f77ad"} err="failed to get container status \"6ea453e459c4d0502fa0154f4cd7b891b48b1397c82da95c2c4cc3c92b7f77ad\": rpc error: code = NotFound desc = could not find container \"6ea453e459c4d0502fa0154f4cd7b891b48b1397c82da95c2c4cc3c92b7f77ad\": container with ID starting with 6ea453e459c4d0502fa0154f4cd7b891b48b1397c82da95c2c4cc3c92b7f77ad not found: ID does not exist" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.341791 4871 scope.go:117] "RemoveContainer" containerID="b8df4cc7b73374c4e0e7b908d331606f9728753abcd328fda6cc8cc68ea8bb26" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.354125 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88cc811c-5779-4c87-8736-52b9635334ce-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.354157 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88cc811c-5779-4c87-8736-52b9635334ce-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.354170 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l55nk\" (UniqueName: \"kubernetes.io/projected/88cc811c-5779-4c87-8736-52b9635334ce-kube-api-access-l55nk\") on node \"crc\" DevicePath \"\"" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.371335 4871 scope.go:117] "RemoveContainer" containerID="0cc43b86632b879703cf4429088a62b9a8c0d545c2ef6b2c748862ed59f604af" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.394826 4871 scope.go:117] "RemoveContainer" containerID="42853a9301d46506134c49cc6f93bb654c48c8ead3c73913ea32a8ee3a9965a3" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.413440 4871 scope.go:117] "RemoveContainer" containerID="b8df4cc7b73374c4e0e7b908d331606f9728753abcd328fda6cc8cc68ea8bb26" Nov 26 06:11:42 crc kubenswrapper[4871]: E1126 06:11:42.413972 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8df4cc7b73374c4e0e7b908d331606f9728753abcd328fda6cc8cc68ea8bb26\": container with ID starting with b8df4cc7b73374c4e0e7b908d331606f9728753abcd328fda6cc8cc68ea8bb26 not found: ID does not exist" containerID="b8df4cc7b73374c4e0e7b908d331606f9728753abcd328fda6cc8cc68ea8bb26" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.414039 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8df4cc7b73374c4e0e7b908d331606f9728753abcd328fda6cc8cc68ea8bb26"} err="failed to get container status \"b8df4cc7b73374c4e0e7b908d331606f9728753abcd328fda6cc8cc68ea8bb26\": rpc error: code = NotFound desc = could not find container \"b8df4cc7b73374c4e0e7b908d331606f9728753abcd328fda6cc8cc68ea8bb26\": container with ID starting with b8df4cc7b73374c4e0e7b908d331606f9728753abcd328fda6cc8cc68ea8bb26 not found: ID does not exist" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.414086 4871 scope.go:117] "RemoveContainer" containerID="0cc43b86632b879703cf4429088a62b9a8c0d545c2ef6b2c748862ed59f604af" Nov 26 06:11:42 crc kubenswrapper[4871]: E1126 06:11:42.414502 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0cc43b86632b879703cf4429088a62b9a8c0d545c2ef6b2c748862ed59f604af\": container with ID starting with 0cc43b86632b879703cf4429088a62b9a8c0d545c2ef6b2c748862ed59f604af not found: ID does not exist" containerID="0cc43b86632b879703cf4429088a62b9a8c0d545c2ef6b2c748862ed59f604af" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.414556 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0cc43b86632b879703cf4429088a62b9a8c0d545c2ef6b2c748862ed59f604af"} err="failed to get container status \"0cc43b86632b879703cf4429088a62b9a8c0d545c2ef6b2c748862ed59f604af\": rpc error: code = NotFound desc = could not find container \"0cc43b86632b879703cf4429088a62b9a8c0d545c2ef6b2c748862ed59f604af\": container with ID starting with 0cc43b86632b879703cf4429088a62b9a8c0d545c2ef6b2c748862ed59f604af not found: ID does not exist" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.414586 4871 scope.go:117] "RemoveContainer" containerID="42853a9301d46506134c49cc6f93bb654c48c8ead3c73913ea32a8ee3a9965a3" Nov 26 06:11:42 crc kubenswrapper[4871]: E1126 06:11:42.415033 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42853a9301d46506134c49cc6f93bb654c48c8ead3c73913ea32a8ee3a9965a3\": container with ID starting with 42853a9301d46506134c49cc6f93bb654c48c8ead3c73913ea32a8ee3a9965a3 not found: ID does not exist" containerID="42853a9301d46506134c49cc6f93bb654c48c8ead3c73913ea32a8ee3a9965a3" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.415058 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42853a9301d46506134c49cc6f93bb654c48c8ead3c73913ea32a8ee3a9965a3"} err="failed to get container status \"42853a9301d46506134c49cc6f93bb654c48c8ead3c73913ea32a8ee3a9965a3\": rpc error: code = NotFound desc = could not find container \"42853a9301d46506134c49cc6f93bb654c48c8ead3c73913ea32a8ee3a9965a3\": container with ID starting with 42853a9301d46506134c49cc6f93bb654c48c8ead3c73913ea32a8ee3a9965a3 not found: ID does not exist" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.526767 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77cf21d1-ae5f-48c3-ad54-428adeb822a9" path="/var/lib/kubelet/pods/77cf21d1-ae5f-48c3-ad54-428adeb822a9/volumes" Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.577186 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-s7qkn"] Nov 26 06:11:42 crc kubenswrapper[4871]: I1126 06:11:42.585855 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-s7qkn"] Nov 26 06:11:43 crc kubenswrapper[4871]: I1126 06:11:43.144002 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dtv6h" podUID="80ef13e3-9fdd-465f-bca8-8d2a14e7ef42" containerName="registry-server" probeResult="failure" output=< Nov 26 06:11:43 crc kubenswrapper[4871]: timeout: failed to connect service ":50051" within 1s Nov 26 06:11:43 crc kubenswrapper[4871]: > Nov 26 06:11:44 crc kubenswrapper[4871]: I1126 06:11:44.519348 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88cc811c-5779-4c87-8736-52b9635334ce" path="/var/lib/kubelet/pods/88cc811c-5779-4c87-8736-52b9635334ce/volumes" Nov 26 06:11:52 crc kubenswrapper[4871]: I1126 06:11:52.162055 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dtv6h" Nov 26 06:11:52 crc kubenswrapper[4871]: I1126 06:11:52.232387 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dtv6h" Nov 26 06:11:52 crc kubenswrapper[4871]: I1126 06:11:52.404539 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dtv6h"] Nov 26 06:11:53 crc kubenswrapper[4871]: I1126 06:11:53.381948 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dtv6h" podUID="80ef13e3-9fdd-465f-bca8-8d2a14e7ef42" containerName="registry-server" containerID="cri-o://d154def9bcec3ac0304abe0e4b503ab6d851b1e2a20a29355a00b80404e381a0" gracePeriod=2 Nov 26 06:11:53 crc kubenswrapper[4871]: I1126 06:11:53.978422 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dtv6h" Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.128786 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80ef13e3-9fdd-465f-bca8-8d2a14e7ef42-utilities\") pod \"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42\" (UID: \"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42\") " Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.129051 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-znf6f\" (UniqueName: \"kubernetes.io/projected/80ef13e3-9fdd-465f-bca8-8d2a14e7ef42-kube-api-access-znf6f\") pod \"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42\" (UID: \"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42\") " Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.129171 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80ef13e3-9fdd-465f-bca8-8d2a14e7ef42-catalog-content\") pod \"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42\" (UID: \"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42\") " Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.129778 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80ef13e3-9fdd-465f-bca8-8d2a14e7ef42-utilities" (OuterVolumeSpecName: "utilities") pod "80ef13e3-9fdd-465f-bca8-8d2a14e7ef42" (UID: "80ef13e3-9fdd-465f-bca8-8d2a14e7ef42"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.136708 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80ef13e3-9fdd-465f-bca8-8d2a14e7ef42-kube-api-access-znf6f" (OuterVolumeSpecName: "kube-api-access-znf6f") pod "80ef13e3-9fdd-465f-bca8-8d2a14e7ef42" (UID: "80ef13e3-9fdd-465f-bca8-8d2a14e7ef42"). InnerVolumeSpecName "kube-api-access-znf6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.232178 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80ef13e3-9fdd-465f-bca8-8d2a14e7ef42-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.232208 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-znf6f\" (UniqueName: \"kubernetes.io/projected/80ef13e3-9fdd-465f-bca8-8d2a14e7ef42-kube-api-access-znf6f\") on node \"crc\" DevicePath \"\"" Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.256549 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80ef13e3-9fdd-465f-bca8-8d2a14e7ef42-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "80ef13e3-9fdd-465f-bca8-8d2a14e7ef42" (UID: "80ef13e3-9fdd-465f-bca8-8d2a14e7ef42"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.333688 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80ef13e3-9fdd-465f-bca8-8d2a14e7ef42-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.392222 4871 generic.go:334] "Generic (PLEG): container finished" podID="80ef13e3-9fdd-465f-bca8-8d2a14e7ef42" containerID="d154def9bcec3ac0304abe0e4b503ab6d851b1e2a20a29355a00b80404e381a0" exitCode=0 Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.392265 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dtv6h" event={"ID":"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42","Type":"ContainerDied","Data":"d154def9bcec3ac0304abe0e4b503ab6d851b1e2a20a29355a00b80404e381a0"} Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.392275 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dtv6h" Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.392295 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dtv6h" event={"ID":"80ef13e3-9fdd-465f-bca8-8d2a14e7ef42","Type":"ContainerDied","Data":"6f2c5ee02f7cddb2921bd8b830b69b95f775afc7dd0515b77f1e6ac520086a5c"} Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.392313 4871 scope.go:117] "RemoveContainer" containerID="d154def9bcec3ac0304abe0e4b503ab6d851b1e2a20a29355a00b80404e381a0" Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.418894 4871 scope.go:117] "RemoveContainer" containerID="0693353c2971c2e44ebe09261a0d84dcc08b82722552782f1974c1e787cb8b81" Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.432557 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dtv6h"] Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.443827 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dtv6h"] Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.446599 4871 scope.go:117] "RemoveContainer" containerID="a3bd6a0c820c700e4000cb23a96515b5a4cb5f5c8833975744b64f54b7c7e880" Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.491414 4871 scope.go:117] "RemoveContainer" containerID="d154def9bcec3ac0304abe0e4b503ab6d851b1e2a20a29355a00b80404e381a0" Nov 26 06:11:54 crc kubenswrapper[4871]: E1126 06:11:54.491914 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d154def9bcec3ac0304abe0e4b503ab6d851b1e2a20a29355a00b80404e381a0\": container with ID starting with d154def9bcec3ac0304abe0e4b503ab6d851b1e2a20a29355a00b80404e381a0 not found: ID does not exist" containerID="d154def9bcec3ac0304abe0e4b503ab6d851b1e2a20a29355a00b80404e381a0" Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.491949 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d154def9bcec3ac0304abe0e4b503ab6d851b1e2a20a29355a00b80404e381a0"} err="failed to get container status \"d154def9bcec3ac0304abe0e4b503ab6d851b1e2a20a29355a00b80404e381a0\": rpc error: code = NotFound desc = could not find container \"d154def9bcec3ac0304abe0e4b503ab6d851b1e2a20a29355a00b80404e381a0\": container with ID starting with d154def9bcec3ac0304abe0e4b503ab6d851b1e2a20a29355a00b80404e381a0 not found: ID does not exist" Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.491974 4871 scope.go:117] "RemoveContainer" containerID="0693353c2971c2e44ebe09261a0d84dcc08b82722552782f1974c1e787cb8b81" Nov 26 06:11:54 crc kubenswrapper[4871]: E1126 06:11:54.492307 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0693353c2971c2e44ebe09261a0d84dcc08b82722552782f1974c1e787cb8b81\": container with ID starting with 0693353c2971c2e44ebe09261a0d84dcc08b82722552782f1974c1e787cb8b81 not found: ID does not exist" containerID="0693353c2971c2e44ebe09261a0d84dcc08b82722552782f1974c1e787cb8b81" Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.492334 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0693353c2971c2e44ebe09261a0d84dcc08b82722552782f1974c1e787cb8b81"} err="failed to get container status \"0693353c2971c2e44ebe09261a0d84dcc08b82722552782f1974c1e787cb8b81\": rpc error: code = NotFound desc = could not find container \"0693353c2971c2e44ebe09261a0d84dcc08b82722552782f1974c1e787cb8b81\": container with ID starting with 0693353c2971c2e44ebe09261a0d84dcc08b82722552782f1974c1e787cb8b81 not found: ID does not exist" Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.492349 4871 scope.go:117] "RemoveContainer" containerID="a3bd6a0c820c700e4000cb23a96515b5a4cb5f5c8833975744b64f54b7c7e880" Nov 26 06:11:54 crc kubenswrapper[4871]: E1126 06:11:54.492576 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3bd6a0c820c700e4000cb23a96515b5a4cb5f5c8833975744b64f54b7c7e880\": container with ID starting with a3bd6a0c820c700e4000cb23a96515b5a4cb5f5c8833975744b64f54b7c7e880 not found: ID does not exist" containerID="a3bd6a0c820c700e4000cb23a96515b5a4cb5f5c8833975744b64f54b7c7e880" Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.492600 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3bd6a0c820c700e4000cb23a96515b5a4cb5f5c8833975744b64f54b7c7e880"} err="failed to get container status \"a3bd6a0c820c700e4000cb23a96515b5a4cb5f5c8833975744b64f54b7c7e880\": rpc error: code = NotFound desc = could not find container \"a3bd6a0c820c700e4000cb23a96515b5a4cb5f5c8833975744b64f54b7c7e880\": container with ID starting with a3bd6a0c820c700e4000cb23a96515b5a4cb5f5c8833975744b64f54b7c7e880 not found: ID does not exist" Nov 26 06:11:54 crc kubenswrapper[4871]: I1126 06:11:54.518010 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80ef13e3-9fdd-465f-bca8-8d2a14e7ef42" path="/var/lib/kubelet/pods/80ef13e3-9fdd-465f-bca8-8d2a14e7ef42/volumes" Nov 26 06:12:52 crc kubenswrapper[4871]: I1126 06:12:52.115803 4871 generic.go:334] "Generic (PLEG): container finished" podID="bfc1b363-fb5b-4872-bf7f-215dc9c617b5" containerID="6a3d0cde83ea6f9ad339a08bbf79bb4b2c5df5e782972e8a611046d2b3066260" exitCode=0 Nov 26 06:12:52 crc kubenswrapper[4871]: I1126 06:12:52.115867 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" event={"ID":"bfc1b363-fb5b-4872-bf7f-215dc9c617b5","Type":"ContainerDied","Data":"6a3d0cde83ea6f9ad339a08bbf79bb4b2c5df5e782972e8a611046d2b3066260"} Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.606513 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.748618 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-inventory\") pod \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.748967 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ceilometer-compute-config-data-0\") pod \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.749030 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ceilometer-compute-config-data-1\") pod \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.749151 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ceilometer-compute-config-data-2\") pod \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.749200 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zk8p\" (UniqueName: \"kubernetes.io/projected/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-kube-api-access-4zk8p\") pod \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.749218 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ssh-key\") pod \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.749235 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-telemetry-combined-ca-bundle\") pod \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\" (UID: \"bfc1b363-fb5b-4872-bf7f-215dc9c617b5\") " Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.755470 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-kube-api-access-4zk8p" (OuterVolumeSpecName: "kube-api-access-4zk8p") pod "bfc1b363-fb5b-4872-bf7f-215dc9c617b5" (UID: "bfc1b363-fb5b-4872-bf7f-215dc9c617b5"). InnerVolumeSpecName "kube-api-access-4zk8p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.756165 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "bfc1b363-fb5b-4872-bf7f-215dc9c617b5" (UID: "bfc1b363-fb5b-4872-bf7f-215dc9c617b5"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.786358 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "bfc1b363-fb5b-4872-bf7f-215dc9c617b5" (UID: "bfc1b363-fb5b-4872-bf7f-215dc9c617b5"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.788041 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "bfc1b363-fb5b-4872-bf7f-215dc9c617b5" (UID: "bfc1b363-fb5b-4872-bf7f-215dc9c617b5"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.789473 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "bfc1b363-fb5b-4872-bf7f-215dc9c617b5" (UID: "bfc1b363-fb5b-4872-bf7f-215dc9c617b5"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.799777 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-inventory" (OuterVolumeSpecName: "inventory") pod "bfc1b363-fb5b-4872-bf7f-215dc9c617b5" (UID: "bfc1b363-fb5b-4872-bf7f-215dc9c617b5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.800315 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "bfc1b363-fb5b-4872-bf7f-215dc9c617b5" (UID: "bfc1b363-fb5b-4872-bf7f-215dc9c617b5"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.851250 4871 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.851296 4871 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.851310 4871 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.851326 4871 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.851342 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zk8p\" (UniqueName: \"kubernetes.io/projected/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-kube-api-access-4zk8p\") on node \"crc\" DevicePath \"\"" Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.851357 4871 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 06:12:53 crc kubenswrapper[4871]: I1126 06:12:53.851368 4871 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfc1b363-fb5b-4872-bf7f-215dc9c617b5-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:12:54 crc kubenswrapper[4871]: I1126 06:12:54.141291 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" event={"ID":"bfc1b363-fb5b-4872-bf7f-215dc9c617b5","Type":"ContainerDied","Data":"0928c300e95bea0372fdc1cd614d4d00f9aa2eac1396316e7491b21a5c642751"} Nov 26 06:12:54 crc kubenswrapper[4871]: I1126 06:12:54.141380 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0928c300e95bea0372fdc1cd614d4d00f9aa2eac1396316e7491b21a5c642751" Nov 26 06:12:54 crc kubenswrapper[4871]: I1126 06:12:54.141333 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8mffj" Nov 26 06:13:32 crc kubenswrapper[4871]: I1126 06:13:32.110391 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 06:13:32 crc kubenswrapper[4871]: I1126 06:13:32.111253 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="a58d8ebe-c4cb-45c2-8529-d1094ee56518" containerName="prometheus" containerID="cri-o://828d8d5299d261ae0bc7ff55dd4131d3f68e2eb13f1abbc97d7c4a29df22da69" gracePeriod=600 Nov 26 06:13:32 crc kubenswrapper[4871]: I1126 06:13:32.111806 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="a58d8ebe-c4cb-45c2-8529-d1094ee56518" containerName="thanos-sidecar" containerID="cri-o://c91093b08ccb0bf438dac84877b6239ad13e3ef34f3e327645ffe7ea7c5763f8" gracePeriod=600 Nov 26 06:13:32 crc kubenswrapper[4871]: I1126 06:13:32.111890 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="a58d8ebe-c4cb-45c2-8529-d1094ee56518" containerName="config-reloader" containerID="cri-o://4b244c43246bcc6d93fa6e6ab5c1c8994ef3e0285f170a7812f3854671280764" gracePeriod=600 Nov 26 06:13:32 crc kubenswrapper[4871]: I1126 06:13:32.944241 4871 generic.go:334] "Generic (PLEG): container finished" podID="a58d8ebe-c4cb-45c2-8529-d1094ee56518" containerID="c91093b08ccb0bf438dac84877b6239ad13e3ef34f3e327645ffe7ea7c5763f8" exitCode=0 Nov 26 06:13:32 crc kubenswrapper[4871]: I1126 06:13:32.944894 4871 generic.go:334] "Generic (PLEG): container finished" podID="a58d8ebe-c4cb-45c2-8529-d1094ee56518" containerID="4b244c43246bcc6d93fa6e6ab5c1c8994ef3e0285f170a7812f3854671280764" exitCode=0 Nov 26 06:13:32 crc kubenswrapper[4871]: I1126 06:13:32.944905 4871 generic.go:334] "Generic (PLEG): container finished" podID="a58d8ebe-c4cb-45c2-8529-d1094ee56518" containerID="828d8d5299d261ae0bc7ff55dd4131d3f68e2eb13f1abbc97d7c4a29df22da69" exitCode=0 Nov 26 06:13:32 crc kubenswrapper[4871]: I1126 06:13:32.944407 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a58d8ebe-c4cb-45c2-8529-d1094ee56518","Type":"ContainerDied","Data":"c91093b08ccb0bf438dac84877b6239ad13e3ef34f3e327645ffe7ea7c5763f8"} Nov 26 06:13:32 crc kubenswrapper[4871]: I1126 06:13:32.944946 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a58d8ebe-c4cb-45c2-8529-d1094ee56518","Type":"ContainerDied","Data":"4b244c43246bcc6d93fa6e6ab5c1c8994ef3e0285f170a7812f3854671280764"} Nov 26 06:13:32 crc kubenswrapper[4871]: I1126 06:13:32.944961 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a58d8ebe-c4cb-45c2-8529-d1094ee56518","Type":"ContainerDied","Data":"828d8d5299d261ae0bc7ff55dd4131d3f68e2eb13f1abbc97d7c4a29df22da69"} Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.217394 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.360879 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\") pod \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.360993 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbtnm\" (UniqueName: \"kubernetes.io/projected/a58d8ebe-c4cb-45c2-8529-d1094ee56518-kube-api-access-dbtnm\") pod \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.361041 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-thanos-prometheus-http-client-file\") pod \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.361080 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/a58d8ebe-c4cb-45c2-8529-d1094ee56518-tls-assets\") pod \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.361108 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-config\") pod \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.361168 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-web-config\") pod \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.361212 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-secret-combined-ca-bundle\") pod \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.361247 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/a58d8ebe-c4cb-45c2-8529-d1094ee56518-prometheus-metric-storage-rulefiles-0\") pod \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.361281 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.361318 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.361351 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/a58d8ebe-c4cb-45c2-8529-d1094ee56518-config-out\") pod \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\" (UID: \"a58d8ebe-c4cb-45c2-8529-d1094ee56518\") " Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.362121 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a58d8ebe-c4cb-45c2-8529-d1094ee56518-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "a58d8ebe-c4cb-45c2-8529-d1094ee56518" (UID: "a58d8ebe-c4cb-45c2-8529-d1094ee56518"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.369009 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d") pod "a58d8ebe-c4cb-45c2-8529-d1094ee56518" (UID: "a58d8ebe-c4cb-45c2-8529-d1094ee56518"). InnerVolumeSpecName "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.369038 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "a58d8ebe-c4cb-45c2-8529-d1094ee56518" (UID: "a58d8ebe-c4cb-45c2-8529-d1094ee56518"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.369886 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-secret-combined-ca-bundle" (OuterVolumeSpecName: "secret-combined-ca-bundle") pod "a58d8ebe-c4cb-45c2-8529-d1094ee56518" (UID: "a58d8ebe-c4cb-45c2-8529-d1094ee56518"). InnerVolumeSpecName "secret-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.370717 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a58d8ebe-c4cb-45c2-8529-d1094ee56518-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "a58d8ebe-c4cb-45c2-8529-d1094ee56518" (UID: "a58d8ebe-c4cb-45c2-8529-d1094ee56518"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.370796 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d") pod "a58d8ebe-c4cb-45c2-8529-d1094ee56518" (UID: "a58d8ebe-c4cb-45c2-8529-d1094ee56518"). InnerVolumeSpecName "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.371123 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a58d8ebe-c4cb-45c2-8529-d1094ee56518-kube-api-access-dbtnm" (OuterVolumeSpecName: "kube-api-access-dbtnm") pod "a58d8ebe-c4cb-45c2-8529-d1094ee56518" (UID: "a58d8ebe-c4cb-45c2-8529-d1094ee56518"). InnerVolumeSpecName "kube-api-access-dbtnm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.371539 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-config" (OuterVolumeSpecName: "config") pod "a58d8ebe-c4cb-45c2-8529-d1094ee56518" (UID: "a58d8ebe-c4cb-45c2-8529-d1094ee56518"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.374635 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a58d8ebe-c4cb-45c2-8529-d1094ee56518-config-out" (OuterVolumeSpecName: "config-out") pod "a58d8ebe-c4cb-45c2-8529-d1094ee56518" (UID: "a58d8ebe-c4cb-45c2-8529-d1094ee56518"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.387006 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "a58d8ebe-c4cb-45c2-8529-d1094ee56518" (UID: "a58d8ebe-c4cb-45c2-8529-d1094ee56518"). InnerVolumeSpecName "pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.449508 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-web-config" (OuterVolumeSpecName: "web-config") pod "a58d8ebe-c4cb-45c2-8529-d1094ee56518" (UID: "a58d8ebe-c4cb-45c2-8529-d1094ee56518"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.466106 4871 reconciler_common.go:293] "Volume detached for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-secret-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.466148 4871 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/a58d8ebe-c4cb-45c2-8529-d1094ee56518-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.466166 4871 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") on node \"crc\" DevicePath \"\"" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.466184 4871 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") on node \"crc\" DevicePath \"\"" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.466198 4871 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/a58d8ebe-c4cb-45c2-8529-d1094ee56518-config-out\") on node \"crc\" DevicePath \"\"" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.466242 4871 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\") on node \"crc\" " Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.466259 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbtnm\" (UniqueName: \"kubernetes.io/projected/a58d8ebe-c4cb-45c2-8529-d1094ee56518-kube-api-access-dbtnm\") on node \"crc\" DevicePath \"\"" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.466272 4871 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.466285 4871 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/a58d8ebe-c4cb-45c2-8529-d1094ee56518-tls-assets\") on node \"crc\" DevicePath \"\"" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.466296 4871 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.466308 4871 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/a58d8ebe-c4cb-45c2-8529-d1094ee56518-web-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.491018 4871 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.491448 4871 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9") on node "crc" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.568283 4871 reconciler_common.go:293] "Volume detached for volume \"pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\") on node \"crc\" DevicePath \"\"" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.953927 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a58d8ebe-c4cb-45c2-8529-d1094ee56518","Type":"ContainerDied","Data":"a91c0dd1437315a9192a3816f1120ec7ab4a29a700016858c01ca509aca45f4e"} Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.953985 4871 scope.go:117] "RemoveContainer" containerID="c91093b08ccb0bf438dac84877b6239ad13e3ef34f3e327645ffe7ea7c5763f8" Nov 26 06:13:33 crc kubenswrapper[4871]: I1126 06:13:33.954136 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.002802 4871 scope.go:117] "RemoveContainer" containerID="4b244c43246bcc6d93fa6e6ab5c1c8994ef3e0285f170a7812f3854671280764" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.033676 4871 scope.go:117] "RemoveContainer" containerID="828d8d5299d261ae0bc7ff55dd4131d3f68e2eb13f1abbc97d7c4a29df22da69" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.051269 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.067588 4871 scope.go:117] "RemoveContainer" containerID="27e567f8947af13e33825131fd9bc0fbd300ba7609775a78622bf80250518c7c" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.077938 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.090004 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 06:13:34 crc kubenswrapper[4871]: E1126 06:13:34.090631 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a58d8ebe-c4cb-45c2-8529-d1094ee56518" containerName="prometheus" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.090659 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="a58d8ebe-c4cb-45c2-8529-d1094ee56518" containerName="prometheus" Nov 26 06:13:34 crc kubenswrapper[4871]: E1126 06:13:34.090674 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88cc811c-5779-4c87-8736-52b9635334ce" containerName="extract-utilities" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.090684 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="88cc811c-5779-4c87-8736-52b9635334ce" containerName="extract-utilities" Nov 26 06:13:34 crc kubenswrapper[4871]: E1126 06:13:34.090717 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a58d8ebe-c4cb-45c2-8529-d1094ee56518" containerName="config-reloader" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.090726 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="a58d8ebe-c4cb-45c2-8529-d1094ee56518" containerName="config-reloader" Nov 26 06:13:34 crc kubenswrapper[4871]: E1126 06:13:34.090746 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77cf21d1-ae5f-48c3-ad54-428adeb822a9" containerName="extract-content" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.090754 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="77cf21d1-ae5f-48c3-ad54-428adeb822a9" containerName="extract-content" Nov 26 06:13:34 crc kubenswrapper[4871]: E1126 06:13:34.090774 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80ef13e3-9fdd-465f-bca8-8d2a14e7ef42" containerName="extract-utilities" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.090782 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="80ef13e3-9fdd-465f-bca8-8d2a14e7ef42" containerName="extract-utilities" Nov 26 06:13:34 crc kubenswrapper[4871]: E1126 06:13:34.090801 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80ef13e3-9fdd-465f-bca8-8d2a14e7ef42" containerName="extract-content" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.090810 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="80ef13e3-9fdd-465f-bca8-8d2a14e7ef42" containerName="extract-content" Nov 26 06:13:34 crc kubenswrapper[4871]: E1126 06:13:34.090829 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77cf21d1-ae5f-48c3-ad54-428adeb822a9" containerName="registry-server" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.090837 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="77cf21d1-ae5f-48c3-ad54-428adeb822a9" containerName="registry-server" Nov 26 06:13:34 crc kubenswrapper[4871]: E1126 06:13:34.090862 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a58d8ebe-c4cb-45c2-8529-d1094ee56518" containerName="thanos-sidecar" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.090870 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="a58d8ebe-c4cb-45c2-8529-d1094ee56518" containerName="thanos-sidecar" Nov 26 06:13:34 crc kubenswrapper[4871]: E1126 06:13:34.090884 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a58d8ebe-c4cb-45c2-8529-d1094ee56518" containerName="init-config-reloader" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.090893 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="a58d8ebe-c4cb-45c2-8529-d1094ee56518" containerName="init-config-reloader" Nov 26 06:13:34 crc kubenswrapper[4871]: E1126 06:13:34.090907 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77cf21d1-ae5f-48c3-ad54-428adeb822a9" containerName="extract-utilities" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.090916 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="77cf21d1-ae5f-48c3-ad54-428adeb822a9" containerName="extract-utilities" Nov 26 06:13:34 crc kubenswrapper[4871]: E1126 06:13:34.090932 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88cc811c-5779-4c87-8736-52b9635334ce" containerName="registry-server" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.090940 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="88cc811c-5779-4c87-8736-52b9635334ce" containerName="registry-server" Nov 26 06:13:34 crc kubenswrapper[4871]: E1126 06:13:34.090957 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfc1b363-fb5b-4872-bf7f-215dc9c617b5" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.093784 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfc1b363-fb5b-4872-bf7f-215dc9c617b5" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 26 06:13:34 crc kubenswrapper[4871]: E1126 06:13:34.093842 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88cc811c-5779-4c87-8736-52b9635334ce" containerName="extract-content" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.093853 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="88cc811c-5779-4c87-8736-52b9635334ce" containerName="extract-content" Nov 26 06:13:34 crc kubenswrapper[4871]: E1126 06:13:34.093867 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80ef13e3-9fdd-465f-bca8-8d2a14e7ef42" containerName="registry-server" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.093878 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="80ef13e3-9fdd-465f-bca8-8d2a14e7ef42" containerName="registry-server" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.094240 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="a58d8ebe-c4cb-45c2-8529-d1094ee56518" containerName="config-reloader" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.094276 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfc1b363-fb5b-4872-bf7f-215dc9c617b5" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.094302 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="77cf21d1-ae5f-48c3-ad54-428adeb822a9" containerName="registry-server" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.094318 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="88cc811c-5779-4c87-8736-52b9635334ce" containerName="registry-server" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.094341 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="a58d8ebe-c4cb-45c2-8529-d1094ee56518" containerName="thanos-sidecar" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.094354 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="a58d8ebe-c4cb-45c2-8529-d1094ee56518" containerName="prometheus" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.094368 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="80ef13e3-9fdd-465f-bca8-8d2a14e7ef42" containerName="registry-server" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.097903 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.098359 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.102468 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.102723 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.102903 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.103594 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.103879 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-qb6hl" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.116666 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.181682 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.181797 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-config\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.181845 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.181911 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.181967 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vg79\" (UniqueName: \"kubernetes.io/projected/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-kube-api-access-8vg79\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.182003 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.182046 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.182073 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.182130 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.182162 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.182192 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.283438 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vg79\" (UniqueName: \"kubernetes.io/projected/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-kube-api-access-8vg79\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.283502 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.283567 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.283593 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.283659 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.283690 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.283715 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.283776 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.283896 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-config\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.283947 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.284026 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.286994 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.289116 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.290127 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-config\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.294728 4871 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.294772 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/411f5d2e0132cbadcdbc80898abccb6eaaa272fad7576dd15cdb4f42514f558a/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.302384 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.303366 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.304545 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vg79\" (UniqueName: \"kubernetes.io/projected/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-kube-api-access-8vg79\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.304730 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.306517 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.306793 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.309337 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ea6e2b4-f88f-48c1-9044-5697a38a7abb-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.337130 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c1358cf1-f0a8-411e-82e1-a150ba384dd9\") pod \"prometheus-metric-storage-0\" (UID: \"8ea6e2b4-f88f-48c1-9044-5697a38a7abb\") " pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.467628 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.519983 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a58d8ebe-c4cb-45c2-8529-d1094ee56518" path="/var/lib/kubelet/pods/a58d8ebe-c4cb-45c2-8529-d1094ee56518/volumes" Nov 26 06:13:34 crc kubenswrapper[4871]: I1126 06:13:34.977699 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 06:13:35 crc kubenswrapper[4871]: I1126 06:13:35.979061 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8ea6e2b4-f88f-48c1-9044-5697a38a7abb","Type":"ContainerStarted","Data":"e3539dee8e229f19fb58892333296fa990b6d5e8490657a9f1def299fac73e2b"} Nov 26 06:13:40 crc kubenswrapper[4871]: I1126 06:13:40.033810 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8ea6e2b4-f88f-48c1-9044-5697a38a7abb","Type":"ContainerStarted","Data":"67fb96d07de130cb24eedae0484affbe29ca584a4a12ca8cceddff0a813d18dd"} Nov 26 06:13:49 crc kubenswrapper[4871]: I1126 06:13:49.175161 4871 generic.go:334] "Generic (PLEG): container finished" podID="8ea6e2b4-f88f-48c1-9044-5697a38a7abb" containerID="67fb96d07de130cb24eedae0484affbe29ca584a4a12ca8cceddff0a813d18dd" exitCode=0 Nov 26 06:13:49 crc kubenswrapper[4871]: I1126 06:13:49.175500 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8ea6e2b4-f88f-48c1-9044-5697a38a7abb","Type":"ContainerDied","Data":"67fb96d07de130cb24eedae0484affbe29ca584a4a12ca8cceddff0a813d18dd"} Nov 26 06:13:50 crc kubenswrapper[4871]: I1126 06:13:50.194510 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8ea6e2b4-f88f-48c1-9044-5697a38a7abb","Type":"ContainerStarted","Data":"97c4e29d40cca345e15c925b51cda3b9e1255a8372058b6bbb727dcc0f65e603"} Nov 26 06:13:53 crc kubenswrapper[4871]: I1126 06:13:53.225582 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8ea6e2b4-f88f-48c1-9044-5697a38a7abb","Type":"ContainerStarted","Data":"1dfae725f0bae25c2eeed013f5cd641bd8b56e0d3cc446e8bfec3406ef1e0ac5"} Nov 26 06:13:53 crc kubenswrapper[4871]: I1126 06:13:53.614634 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:13:53 crc kubenswrapper[4871]: I1126 06:13:53.614974 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:13:54 crc kubenswrapper[4871]: I1126 06:13:54.249007 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8ea6e2b4-f88f-48c1-9044-5697a38a7abb","Type":"ContainerStarted","Data":"98b127d4eceb2646ea23e7270f8270a0ec4d7e440dce1ec0718972d7f3c01a18"} Nov 26 06:13:54 crc kubenswrapper[4871]: I1126 06:13:54.284092 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=20.284073012 podStartE2EDuration="20.284073012s" podCreationTimestamp="2025-11-26 06:13:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:13:54.281369445 +0000 UTC m=+2892.464421101" watchObservedRunningTime="2025-11-26 06:13:54.284073012 +0000 UTC m=+2892.467124618" Nov 26 06:13:54 crc kubenswrapper[4871]: I1126 06:13:54.468084 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 26 06:14:04 crc kubenswrapper[4871]: I1126 06:14:04.467924 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 26 06:14:04 crc kubenswrapper[4871]: I1126 06:14:04.474059 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 26 06:14:05 crc kubenswrapper[4871]: I1126 06:14:05.458388 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 26 06:14:23 crc kubenswrapper[4871]: I1126 06:14:23.614748 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:14:23 crc kubenswrapper[4871]: I1126 06:14:23.615428 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.183219 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.186426 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.191771 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.192202 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-lgbwn" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.192221 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.192294 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.198459 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.290130 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.290501 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-config-data\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.290554 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.290588 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.290649 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.290696 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8h2ql\" (UniqueName: \"kubernetes.io/projected/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-kube-api-access-8h2ql\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.290745 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.290792 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.290880 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.393005 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.393086 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.393164 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.393193 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-config-data\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.393212 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.393233 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.393274 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.393308 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8h2ql\" (UniqueName: \"kubernetes.io/projected/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-kube-api-access-8h2ql\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.393341 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.393849 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.394503 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.395607 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.396854 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.397587 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-config-data\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.401297 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.401506 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.403406 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.414694 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8h2ql\" (UniqueName: \"kubernetes.io/projected/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-kube-api-access-8h2ql\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.430911 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " pod="openstack/tempest-tests-tempest" Nov 26 06:14:25 crc kubenswrapper[4871]: I1126 06:14:25.524433 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 26 06:14:26 crc kubenswrapper[4871]: I1126 06:14:26.024708 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 26 06:14:26 crc kubenswrapper[4871]: I1126 06:14:26.696696 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc","Type":"ContainerStarted","Data":"4d4cd0821e4fa8f6cf7636bbd9216f34ca4af89d31d511cca4163a4ca558336d"} Nov 26 06:14:42 crc kubenswrapper[4871]: I1126 06:14:42.878788 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc","Type":"ContainerStarted","Data":"159b5a19cf451f34170e6c1567c3a6d1852b7e29a209a845a19d80e6e00262e6"} Nov 26 06:14:42 crc kubenswrapper[4871]: I1126 06:14:42.907360 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=3.004467257 podStartE2EDuration="18.907340874s" podCreationTimestamp="2025-11-26 06:14:24 +0000 UTC" firstStartedPulling="2025-11-26 06:14:26.029592214 +0000 UTC m=+2924.212643800" lastFinishedPulling="2025-11-26 06:14:41.932465831 +0000 UTC m=+2940.115517417" observedRunningTime="2025-11-26 06:14:42.894222839 +0000 UTC m=+2941.077274465" watchObservedRunningTime="2025-11-26 06:14:42.907340874 +0000 UTC m=+2941.090392460" Nov 26 06:14:53 crc kubenswrapper[4871]: I1126 06:14:53.614719 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:14:53 crc kubenswrapper[4871]: I1126 06:14:53.615385 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:14:53 crc kubenswrapper[4871]: I1126 06:14:53.615452 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 06:14:53 crc kubenswrapper[4871]: I1126 06:14:53.616297 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 06:14:53 crc kubenswrapper[4871]: I1126 06:14:53.616392 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" gracePeriod=600 Nov 26 06:14:53 crc kubenswrapper[4871]: E1126 06:14:53.747871 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:14:54 crc kubenswrapper[4871]: I1126 06:14:54.018107 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" exitCode=0 Nov 26 06:14:54 crc kubenswrapper[4871]: I1126 06:14:54.018232 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f"} Nov 26 06:14:54 crc kubenswrapper[4871]: I1126 06:14:54.019168 4871 scope.go:117] "RemoveContainer" containerID="f70aea01c68d7922032007da61abc4f689feb69d32698e40339cf47f34bc06bf" Nov 26 06:14:54 crc kubenswrapper[4871]: I1126 06:14:54.020289 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:14:54 crc kubenswrapper[4871]: E1126 06:14:54.020804 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:15:00 crc kubenswrapper[4871]: I1126 06:15:00.177167 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j"] Nov 26 06:15:00 crc kubenswrapper[4871]: I1126 06:15:00.179587 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j" Nov 26 06:15:00 crc kubenswrapper[4871]: I1126 06:15:00.182349 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 06:15:00 crc kubenswrapper[4871]: I1126 06:15:00.182624 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 06:15:00 crc kubenswrapper[4871]: I1126 06:15:00.191912 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c9d9076c-3859-4018-87b0-0ca60e08219c-config-volume\") pod \"collect-profiles-29402295-k8j7j\" (UID: \"c9d9076c-3859-4018-87b0-0ca60e08219c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j" Nov 26 06:15:00 crc kubenswrapper[4871]: I1126 06:15:00.192171 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2ltk\" (UniqueName: \"kubernetes.io/projected/c9d9076c-3859-4018-87b0-0ca60e08219c-kube-api-access-r2ltk\") pod \"collect-profiles-29402295-k8j7j\" (UID: \"c9d9076c-3859-4018-87b0-0ca60e08219c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j" Nov 26 06:15:00 crc kubenswrapper[4871]: I1126 06:15:00.192214 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c9d9076c-3859-4018-87b0-0ca60e08219c-secret-volume\") pod \"collect-profiles-29402295-k8j7j\" (UID: \"c9d9076c-3859-4018-87b0-0ca60e08219c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j" Nov 26 06:15:00 crc kubenswrapper[4871]: I1126 06:15:00.196964 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j"] Nov 26 06:15:00 crc kubenswrapper[4871]: I1126 06:15:00.294246 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2ltk\" (UniqueName: \"kubernetes.io/projected/c9d9076c-3859-4018-87b0-0ca60e08219c-kube-api-access-r2ltk\") pod \"collect-profiles-29402295-k8j7j\" (UID: \"c9d9076c-3859-4018-87b0-0ca60e08219c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j" Nov 26 06:15:00 crc kubenswrapper[4871]: I1126 06:15:00.294319 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c9d9076c-3859-4018-87b0-0ca60e08219c-secret-volume\") pod \"collect-profiles-29402295-k8j7j\" (UID: \"c9d9076c-3859-4018-87b0-0ca60e08219c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j" Nov 26 06:15:00 crc kubenswrapper[4871]: I1126 06:15:00.294518 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c9d9076c-3859-4018-87b0-0ca60e08219c-config-volume\") pod \"collect-profiles-29402295-k8j7j\" (UID: \"c9d9076c-3859-4018-87b0-0ca60e08219c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j" Nov 26 06:15:00 crc kubenswrapper[4871]: I1126 06:15:00.295864 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c9d9076c-3859-4018-87b0-0ca60e08219c-config-volume\") pod \"collect-profiles-29402295-k8j7j\" (UID: \"c9d9076c-3859-4018-87b0-0ca60e08219c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j" Nov 26 06:15:00 crc kubenswrapper[4871]: I1126 06:15:00.308850 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c9d9076c-3859-4018-87b0-0ca60e08219c-secret-volume\") pod \"collect-profiles-29402295-k8j7j\" (UID: \"c9d9076c-3859-4018-87b0-0ca60e08219c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j" Nov 26 06:15:00 crc kubenswrapper[4871]: I1126 06:15:00.314077 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2ltk\" (UniqueName: \"kubernetes.io/projected/c9d9076c-3859-4018-87b0-0ca60e08219c-kube-api-access-r2ltk\") pod \"collect-profiles-29402295-k8j7j\" (UID: \"c9d9076c-3859-4018-87b0-0ca60e08219c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j" Nov 26 06:15:00 crc kubenswrapper[4871]: I1126 06:15:00.509911 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j" Nov 26 06:15:01 crc kubenswrapper[4871]: I1126 06:15:01.072486 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j"] Nov 26 06:15:01 crc kubenswrapper[4871]: W1126 06:15:01.080589 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc9d9076c_3859_4018_87b0_0ca60e08219c.slice/crio-47347f506aca97b0ec64edad934bf97b08ae61f9db5fa2b84c7aece510dbfa63 WatchSource:0}: Error finding container 47347f506aca97b0ec64edad934bf97b08ae61f9db5fa2b84c7aece510dbfa63: Status 404 returned error can't find the container with id 47347f506aca97b0ec64edad934bf97b08ae61f9db5fa2b84c7aece510dbfa63 Nov 26 06:15:01 crc kubenswrapper[4871]: I1126 06:15:01.106541 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j" event={"ID":"c9d9076c-3859-4018-87b0-0ca60e08219c","Type":"ContainerStarted","Data":"47347f506aca97b0ec64edad934bf97b08ae61f9db5fa2b84c7aece510dbfa63"} Nov 26 06:15:02 crc kubenswrapper[4871]: I1126 06:15:02.121412 4871 generic.go:334] "Generic (PLEG): container finished" podID="c9d9076c-3859-4018-87b0-0ca60e08219c" containerID="5a7b761141e485c178ee52de85870225a416d99b3b6a361fa0d7e05ab10c8334" exitCode=0 Nov 26 06:15:02 crc kubenswrapper[4871]: I1126 06:15:02.121481 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j" event={"ID":"c9d9076c-3859-4018-87b0-0ca60e08219c","Type":"ContainerDied","Data":"5a7b761141e485c178ee52de85870225a416d99b3b6a361fa0d7e05ab10c8334"} Nov 26 06:15:03 crc kubenswrapper[4871]: I1126 06:15:03.508790 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j" Nov 26 06:15:03 crc kubenswrapper[4871]: I1126 06:15:03.668204 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2ltk\" (UniqueName: \"kubernetes.io/projected/c9d9076c-3859-4018-87b0-0ca60e08219c-kube-api-access-r2ltk\") pod \"c9d9076c-3859-4018-87b0-0ca60e08219c\" (UID: \"c9d9076c-3859-4018-87b0-0ca60e08219c\") " Nov 26 06:15:03 crc kubenswrapper[4871]: I1126 06:15:03.668266 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c9d9076c-3859-4018-87b0-0ca60e08219c-config-volume\") pod \"c9d9076c-3859-4018-87b0-0ca60e08219c\" (UID: \"c9d9076c-3859-4018-87b0-0ca60e08219c\") " Nov 26 06:15:03 crc kubenswrapper[4871]: I1126 06:15:03.668472 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c9d9076c-3859-4018-87b0-0ca60e08219c-secret-volume\") pod \"c9d9076c-3859-4018-87b0-0ca60e08219c\" (UID: \"c9d9076c-3859-4018-87b0-0ca60e08219c\") " Nov 26 06:15:03 crc kubenswrapper[4871]: I1126 06:15:03.670695 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9d9076c-3859-4018-87b0-0ca60e08219c-config-volume" (OuterVolumeSpecName: "config-volume") pod "c9d9076c-3859-4018-87b0-0ca60e08219c" (UID: "c9d9076c-3859-4018-87b0-0ca60e08219c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:15:03 crc kubenswrapper[4871]: I1126 06:15:03.677171 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9d9076c-3859-4018-87b0-0ca60e08219c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "c9d9076c-3859-4018-87b0-0ca60e08219c" (UID: "c9d9076c-3859-4018-87b0-0ca60e08219c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:15:03 crc kubenswrapper[4871]: I1126 06:15:03.691252 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9d9076c-3859-4018-87b0-0ca60e08219c-kube-api-access-r2ltk" (OuterVolumeSpecName: "kube-api-access-r2ltk") pod "c9d9076c-3859-4018-87b0-0ca60e08219c" (UID: "c9d9076c-3859-4018-87b0-0ca60e08219c"). InnerVolumeSpecName "kube-api-access-r2ltk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:15:03 crc kubenswrapper[4871]: I1126 06:15:03.770715 4871 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c9d9076c-3859-4018-87b0-0ca60e08219c-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 06:15:03 crc kubenswrapper[4871]: I1126 06:15:03.770917 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2ltk\" (UniqueName: \"kubernetes.io/projected/c9d9076c-3859-4018-87b0-0ca60e08219c-kube-api-access-r2ltk\") on node \"crc\" DevicePath \"\"" Nov 26 06:15:03 crc kubenswrapper[4871]: I1126 06:15:03.770929 4871 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c9d9076c-3859-4018-87b0-0ca60e08219c-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 06:15:04 crc kubenswrapper[4871]: I1126 06:15:04.144513 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j" event={"ID":"c9d9076c-3859-4018-87b0-0ca60e08219c","Type":"ContainerDied","Data":"47347f506aca97b0ec64edad934bf97b08ae61f9db5fa2b84c7aece510dbfa63"} Nov 26 06:15:04 crc kubenswrapper[4871]: I1126 06:15:04.144662 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="47347f506aca97b0ec64edad934bf97b08ae61f9db5fa2b84c7aece510dbfa63" Nov 26 06:15:04 crc kubenswrapper[4871]: I1126 06:15:04.144692 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j" Nov 26 06:15:04 crc kubenswrapper[4871]: I1126 06:15:04.597580 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g"] Nov 26 06:15:04 crc kubenswrapper[4871]: I1126 06:15:04.605484 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402250-l2k2g"] Nov 26 06:15:06 crc kubenswrapper[4871]: I1126 06:15:06.559033 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6" path="/var/lib/kubelet/pods/ddeb3e21-9b1a-4ebd-8130-9b7ba68b85d6/volumes" Nov 26 06:15:09 crc kubenswrapper[4871]: I1126 06:15:09.507990 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:15:09 crc kubenswrapper[4871]: E1126 06:15:09.508519 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:15:20 crc kubenswrapper[4871]: I1126 06:15:20.507374 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:15:20 crc kubenswrapper[4871]: E1126 06:15:20.508330 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:15:23 crc kubenswrapper[4871]: I1126 06:15:23.368307 4871 scope.go:117] "RemoveContainer" containerID="92c517dee00d279c24d34ac425449d70bcbaf0cab40f8e1af01c92f60fe8525c" Nov 26 06:15:32 crc kubenswrapper[4871]: I1126 06:15:32.516200 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:15:32 crc kubenswrapper[4871]: E1126 06:15:32.517099 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:15:45 crc kubenswrapper[4871]: I1126 06:15:45.507313 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:15:45 crc kubenswrapper[4871]: E1126 06:15:45.508288 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:16:00 crc kubenswrapper[4871]: I1126 06:16:00.509834 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:16:00 crc kubenswrapper[4871]: E1126 06:16:00.510601 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:16:12 crc kubenswrapper[4871]: I1126 06:16:12.518817 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:16:12 crc kubenswrapper[4871]: E1126 06:16:12.520934 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:16:25 crc kubenswrapper[4871]: I1126 06:16:25.508098 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:16:25 crc kubenswrapper[4871]: E1126 06:16:25.514002 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:16:38 crc kubenswrapper[4871]: I1126 06:16:38.508431 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:16:38 crc kubenswrapper[4871]: E1126 06:16:38.509773 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:16:53 crc kubenswrapper[4871]: I1126 06:16:53.507396 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:16:53 crc kubenswrapper[4871]: E1126 06:16:53.508613 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:17:06 crc kubenswrapper[4871]: I1126 06:17:06.507585 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:17:06 crc kubenswrapper[4871]: E1126 06:17:06.510804 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:17:21 crc kubenswrapper[4871]: I1126 06:17:21.507932 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:17:21 crc kubenswrapper[4871]: E1126 06:17:21.509919 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:17:33 crc kubenswrapper[4871]: I1126 06:17:33.507414 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:17:33 crc kubenswrapper[4871]: E1126 06:17:33.508247 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:17:48 crc kubenswrapper[4871]: I1126 06:17:48.510183 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:17:48 crc kubenswrapper[4871]: E1126 06:17:48.511224 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:18:02 crc kubenswrapper[4871]: I1126 06:18:02.518659 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:18:02 crc kubenswrapper[4871]: E1126 06:18:02.519638 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:18:16 crc kubenswrapper[4871]: I1126 06:18:16.507922 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:18:16 crc kubenswrapper[4871]: E1126 06:18:16.509343 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:18:28 crc kubenswrapper[4871]: I1126 06:18:28.507271 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:18:28 crc kubenswrapper[4871]: E1126 06:18:28.508104 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:18:41 crc kubenswrapper[4871]: I1126 06:18:41.506904 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:18:41 crc kubenswrapper[4871]: E1126 06:18:41.507711 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:18:55 crc kubenswrapper[4871]: I1126 06:18:55.507729 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:18:55 crc kubenswrapper[4871]: E1126 06:18:55.508619 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:19:07 crc kubenswrapper[4871]: I1126 06:19:07.507739 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:19:07 crc kubenswrapper[4871]: E1126 06:19:07.511001 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:19:18 crc kubenswrapper[4871]: I1126 06:19:18.507486 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:19:18 crc kubenswrapper[4871]: E1126 06:19:18.508410 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:19:32 crc kubenswrapper[4871]: I1126 06:19:32.517556 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:19:32 crc kubenswrapper[4871]: E1126 06:19:32.518787 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:19:45 crc kubenswrapper[4871]: I1126 06:19:45.507115 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:19:45 crc kubenswrapper[4871]: E1126 06:19:45.507896 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:19:57 crc kubenswrapper[4871]: I1126 06:19:57.507862 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:19:58 crc kubenswrapper[4871]: I1126 06:19:58.768637 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"2b7f2e346520824b3867687e2ae756bb9159d439cbe88ac2c9c598abefcaaaba"} Nov 26 06:21:44 crc kubenswrapper[4871]: I1126 06:21:44.449653 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-p57sb"] Nov 26 06:21:44 crc kubenswrapper[4871]: E1126 06:21:44.450836 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9d9076c-3859-4018-87b0-0ca60e08219c" containerName="collect-profiles" Nov 26 06:21:44 crc kubenswrapper[4871]: I1126 06:21:44.450853 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9d9076c-3859-4018-87b0-0ca60e08219c" containerName="collect-profiles" Nov 26 06:21:44 crc kubenswrapper[4871]: I1126 06:21:44.451101 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9d9076c-3859-4018-87b0-0ca60e08219c" containerName="collect-profiles" Nov 26 06:21:44 crc kubenswrapper[4871]: I1126 06:21:44.452896 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p57sb" Nov 26 06:21:44 crc kubenswrapper[4871]: I1126 06:21:44.464030 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p57sb"] Nov 26 06:21:44 crc kubenswrapper[4871]: I1126 06:21:44.495211 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62e90694-5b6e-467a-908a-35ba613a1f08-utilities\") pod \"redhat-operators-p57sb\" (UID: \"62e90694-5b6e-467a-908a-35ba613a1f08\") " pod="openshift-marketplace/redhat-operators-p57sb" Nov 26 06:21:44 crc kubenswrapper[4871]: I1126 06:21:44.495271 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62e90694-5b6e-467a-908a-35ba613a1f08-catalog-content\") pod \"redhat-operators-p57sb\" (UID: \"62e90694-5b6e-467a-908a-35ba613a1f08\") " pod="openshift-marketplace/redhat-operators-p57sb" Nov 26 06:21:44 crc kubenswrapper[4871]: I1126 06:21:44.495742 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvntd\" (UniqueName: \"kubernetes.io/projected/62e90694-5b6e-467a-908a-35ba613a1f08-kube-api-access-tvntd\") pod \"redhat-operators-p57sb\" (UID: \"62e90694-5b6e-467a-908a-35ba613a1f08\") " pod="openshift-marketplace/redhat-operators-p57sb" Nov 26 06:21:44 crc kubenswrapper[4871]: I1126 06:21:44.597379 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvntd\" (UniqueName: \"kubernetes.io/projected/62e90694-5b6e-467a-908a-35ba613a1f08-kube-api-access-tvntd\") pod \"redhat-operators-p57sb\" (UID: \"62e90694-5b6e-467a-908a-35ba613a1f08\") " pod="openshift-marketplace/redhat-operators-p57sb" Nov 26 06:21:44 crc kubenswrapper[4871]: I1126 06:21:44.597480 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62e90694-5b6e-467a-908a-35ba613a1f08-utilities\") pod \"redhat-operators-p57sb\" (UID: \"62e90694-5b6e-467a-908a-35ba613a1f08\") " pod="openshift-marketplace/redhat-operators-p57sb" Nov 26 06:21:44 crc kubenswrapper[4871]: I1126 06:21:44.597556 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62e90694-5b6e-467a-908a-35ba613a1f08-catalog-content\") pod \"redhat-operators-p57sb\" (UID: \"62e90694-5b6e-467a-908a-35ba613a1f08\") " pod="openshift-marketplace/redhat-operators-p57sb" Nov 26 06:21:44 crc kubenswrapper[4871]: I1126 06:21:44.598078 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62e90694-5b6e-467a-908a-35ba613a1f08-utilities\") pod \"redhat-operators-p57sb\" (UID: \"62e90694-5b6e-467a-908a-35ba613a1f08\") " pod="openshift-marketplace/redhat-operators-p57sb" Nov 26 06:21:44 crc kubenswrapper[4871]: I1126 06:21:44.598081 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62e90694-5b6e-467a-908a-35ba613a1f08-catalog-content\") pod \"redhat-operators-p57sb\" (UID: \"62e90694-5b6e-467a-908a-35ba613a1f08\") " pod="openshift-marketplace/redhat-operators-p57sb" Nov 26 06:21:44 crc kubenswrapper[4871]: I1126 06:21:44.627916 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvntd\" (UniqueName: \"kubernetes.io/projected/62e90694-5b6e-467a-908a-35ba613a1f08-kube-api-access-tvntd\") pod \"redhat-operators-p57sb\" (UID: \"62e90694-5b6e-467a-908a-35ba613a1f08\") " pod="openshift-marketplace/redhat-operators-p57sb" Nov 26 06:21:44 crc kubenswrapper[4871]: I1126 06:21:44.790982 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p57sb" Nov 26 06:21:45 crc kubenswrapper[4871]: I1126 06:21:45.282324 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p57sb"] Nov 26 06:21:46 crc kubenswrapper[4871]: I1126 06:21:46.094449 4871 generic.go:334] "Generic (PLEG): container finished" podID="62e90694-5b6e-467a-908a-35ba613a1f08" containerID="4692dd39d4639c9cd5fa1c9ae39864a38979a8ab36a956b94ba5ceb069be9e39" exitCode=0 Nov 26 06:21:46 crc kubenswrapper[4871]: I1126 06:21:46.094499 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p57sb" event={"ID":"62e90694-5b6e-467a-908a-35ba613a1f08","Type":"ContainerDied","Data":"4692dd39d4639c9cd5fa1c9ae39864a38979a8ab36a956b94ba5ceb069be9e39"} Nov 26 06:21:46 crc kubenswrapper[4871]: I1126 06:21:46.094758 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p57sb" event={"ID":"62e90694-5b6e-467a-908a-35ba613a1f08","Type":"ContainerStarted","Data":"a01638595211f4f22f88326551b27bd4c695468a9880ef9fd036c2250b5181d7"} Nov 26 06:21:46 crc kubenswrapper[4871]: I1126 06:21:46.099209 4871 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 06:21:47 crc kubenswrapper[4871]: I1126 06:21:47.114817 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p57sb" event={"ID":"62e90694-5b6e-467a-908a-35ba613a1f08","Type":"ContainerStarted","Data":"882e159b60ca72ae958e324fb49f391394b32b6e892e76675765f18c5ef01066"} Nov 26 06:21:51 crc kubenswrapper[4871]: I1126 06:21:51.168651 4871 generic.go:334] "Generic (PLEG): container finished" podID="62e90694-5b6e-467a-908a-35ba613a1f08" containerID="882e159b60ca72ae958e324fb49f391394b32b6e892e76675765f18c5ef01066" exitCode=0 Nov 26 06:21:51 crc kubenswrapper[4871]: I1126 06:21:51.168816 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p57sb" event={"ID":"62e90694-5b6e-467a-908a-35ba613a1f08","Type":"ContainerDied","Data":"882e159b60ca72ae958e324fb49f391394b32b6e892e76675765f18c5ef01066"} Nov 26 06:21:52 crc kubenswrapper[4871]: I1126 06:21:52.184685 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p57sb" event={"ID":"62e90694-5b6e-467a-908a-35ba613a1f08","Type":"ContainerStarted","Data":"bfdd9ac24093feb71ed4c94ab02a853647c48f49376cb10e64ca0a2f6d993ca7"} Nov 26 06:21:52 crc kubenswrapper[4871]: I1126 06:21:52.210846 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-p57sb" podStartSLOduration=2.725154465 podStartE2EDuration="8.210823204s" podCreationTimestamp="2025-11-26 06:21:44 +0000 UTC" firstStartedPulling="2025-11-26 06:21:46.098893562 +0000 UTC m=+3364.281945148" lastFinishedPulling="2025-11-26 06:21:51.584562281 +0000 UTC m=+3369.767613887" observedRunningTime="2025-11-26 06:21:52.206041947 +0000 UTC m=+3370.389093573" watchObservedRunningTime="2025-11-26 06:21:52.210823204 +0000 UTC m=+3370.393874830" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.385691 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pglwr"] Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.388590 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pglwr" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.405463 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pglwr"] Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.519378 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae7307ba-fd30-45fd-9d67-b7a3688dcdb2-utilities\") pod \"redhat-marketplace-pglwr\" (UID: \"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2\") " pod="openshift-marketplace/redhat-marketplace-pglwr" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.519414 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blch6\" (UniqueName: \"kubernetes.io/projected/ae7307ba-fd30-45fd-9d67-b7a3688dcdb2-kube-api-access-blch6\") pod \"redhat-marketplace-pglwr\" (UID: \"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2\") " pod="openshift-marketplace/redhat-marketplace-pglwr" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.519469 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae7307ba-fd30-45fd-9d67-b7a3688dcdb2-catalog-content\") pod \"redhat-marketplace-pglwr\" (UID: \"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2\") " pod="openshift-marketplace/redhat-marketplace-pglwr" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.586468 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2hgth"] Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.588424 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2hgth" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.604915 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2hgth"] Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.620689 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae7307ba-fd30-45fd-9d67-b7a3688dcdb2-utilities\") pod \"redhat-marketplace-pglwr\" (UID: \"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2\") " pod="openshift-marketplace/redhat-marketplace-pglwr" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.620726 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blch6\" (UniqueName: \"kubernetes.io/projected/ae7307ba-fd30-45fd-9d67-b7a3688dcdb2-kube-api-access-blch6\") pod \"redhat-marketplace-pglwr\" (UID: \"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2\") " pod="openshift-marketplace/redhat-marketplace-pglwr" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.620789 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae7307ba-fd30-45fd-9d67-b7a3688dcdb2-catalog-content\") pod \"redhat-marketplace-pglwr\" (UID: \"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2\") " pod="openshift-marketplace/redhat-marketplace-pglwr" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.621610 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae7307ba-fd30-45fd-9d67-b7a3688dcdb2-utilities\") pod \"redhat-marketplace-pglwr\" (UID: \"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2\") " pod="openshift-marketplace/redhat-marketplace-pglwr" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.622583 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae7307ba-fd30-45fd-9d67-b7a3688dcdb2-catalog-content\") pod \"redhat-marketplace-pglwr\" (UID: \"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2\") " pod="openshift-marketplace/redhat-marketplace-pglwr" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.657787 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blch6\" (UniqueName: \"kubernetes.io/projected/ae7307ba-fd30-45fd-9d67-b7a3688dcdb2-kube-api-access-blch6\") pod \"redhat-marketplace-pglwr\" (UID: \"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2\") " pod="openshift-marketplace/redhat-marketplace-pglwr" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.722687 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88176307-a2eb-4a84-9abb-f88ee3635a6b-catalog-content\") pod \"certified-operators-2hgth\" (UID: \"88176307-a2eb-4a84-9abb-f88ee3635a6b\") " pod="openshift-marketplace/certified-operators-2hgth" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.722933 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g875t\" (UniqueName: \"kubernetes.io/projected/88176307-a2eb-4a84-9abb-f88ee3635a6b-kube-api-access-g875t\") pod \"certified-operators-2hgth\" (UID: \"88176307-a2eb-4a84-9abb-f88ee3635a6b\") " pod="openshift-marketplace/certified-operators-2hgth" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.722957 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88176307-a2eb-4a84-9abb-f88ee3635a6b-utilities\") pod \"certified-operators-2hgth\" (UID: \"88176307-a2eb-4a84-9abb-f88ee3635a6b\") " pod="openshift-marketplace/certified-operators-2hgth" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.733920 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pglwr" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.791720 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-p57sb" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.792832 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-p57sb" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.825238 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g875t\" (UniqueName: \"kubernetes.io/projected/88176307-a2eb-4a84-9abb-f88ee3635a6b-kube-api-access-g875t\") pod \"certified-operators-2hgth\" (UID: \"88176307-a2eb-4a84-9abb-f88ee3635a6b\") " pod="openshift-marketplace/certified-operators-2hgth" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.825290 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88176307-a2eb-4a84-9abb-f88ee3635a6b-utilities\") pod \"certified-operators-2hgth\" (UID: \"88176307-a2eb-4a84-9abb-f88ee3635a6b\") " pod="openshift-marketplace/certified-operators-2hgth" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.825339 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88176307-a2eb-4a84-9abb-f88ee3635a6b-catalog-content\") pod \"certified-operators-2hgth\" (UID: \"88176307-a2eb-4a84-9abb-f88ee3635a6b\") " pod="openshift-marketplace/certified-operators-2hgth" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.825825 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88176307-a2eb-4a84-9abb-f88ee3635a6b-catalog-content\") pod \"certified-operators-2hgth\" (UID: \"88176307-a2eb-4a84-9abb-f88ee3635a6b\") " pod="openshift-marketplace/certified-operators-2hgth" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.826344 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88176307-a2eb-4a84-9abb-f88ee3635a6b-utilities\") pod \"certified-operators-2hgth\" (UID: \"88176307-a2eb-4a84-9abb-f88ee3635a6b\") " pod="openshift-marketplace/certified-operators-2hgth" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.842719 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g875t\" (UniqueName: \"kubernetes.io/projected/88176307-a2eb-4a84-9abb-f88ee3635a6b-kube-api-access-g875t\") pod \"certified-operators-2hgth\" (UID: \"88176307-a2eb-4a84-9abb-f88ee3635a6b\") " pod="openshift-marketplace/certified-operators-2hgth" Nov 26 06:21:54 crc kubenswrapper[4871]: I1126 06:21:54.919470 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2hgth" Nov 26 06:21:55 crc kubenswrapper[4871]: I1126 06:21:55.356313 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pglwr"] Nov 26 06:21:55 crc kubenswrapper[4871]: W1126 06:21:55.372432 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae7307ba_fd30_45fd_9d67_b7a3688dcdb2.slice/crio-7715d5849ab0ae54cb5632cb0dd77248c4e76e1674b956c7603a8bec87c725d1 WatchSource:0}: Error finding container 7715d5849ab0ae54cb5632cb0dd77248c4e76e1674b956c7603a8bec87c725d1: Status 404 returned error can't find the container with id 7715d5849ab0ae54cb5632cb0dd77248c4e76e1674b956c7603a8bec87c725d1 Nov 26 06:21:55 crc kubenswrapper[4871]: I1126 06:21:55.546296 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2hgth"] Nov 26 06:21:55 crc kubenswrapper[4871]: W1126 06:21:55.549412 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod88176307_a2eb_4a84_9abb_f88ee3635a6b.slice/crio-9509c5fbf057c6e6191182366f4e941cd69bf895c56664c5cbf145ba9ab67eba WatchSource:0}: Error finding container 9509c5fbf057c6e6191182366f4e941cd69bf895c56664c5cbf145ba9ab67eba: Status 404 returned error can't find the container with id 9509c5fbf057c6e6191182366f4e941cd69bf895c56664c5cbf145ba9ab67eba Nov 26 06:21:55 crc kubenswrapper[4871]: I1126 06:21:55.847080 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-p57sb" podUID="62e90694-5b6e-467a-908a-35ba613a1f08" containerName="registry-server" probeResult="failure" output=< Nov 26 06:21:55 crc kubenswrapper[4871]: timeout: failed to connect service ":50051" within 1s Nov 26 06:21:55 crc kubenswrapper[4871]: > Nov 26 06:21:56 crc kubenswrapper[4871]: I1126 06:21:56.242329 4871 generic.go:334] "Generic (PLEG): container finished" podID="ae7307ba-fd30-45fd-9d67-b7a3688dcdb2" containerID="9973055d330c1a9be0e6bdbc241d97058a9ac200bb99c0f430d88fee2dc44843" exitCode=0 Nov 26 06:21:56 crc kubenswrapper[4871]: I1126 06:21:56.242385 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pglwr" event={"ID":"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2","Type":"ContainerDied","Data":"9973055d330c1a9be0e6bdbc241d97058a9ac200bb99c0f430d88fee2dc44843"} Nov 26 06:21:56 crc kubenswrapper[4871]: I1126 06:21:56.242429 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pglwr" event={"ID":"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2","Type":"ContainerStarted","Data":"7715d5849ab0ae54cb5632cb0dd77248c4e76e1674b956c7603a8bec87c725d1"} Nov 26 06:21:56 crc kubenswrapper[4871]: I1126 06:21:56.244300 4871 generic.go:334] "Generic (PLEG): container finished" podID="88176307-a2eb-4a84-9abb-f88ee3635a6b" containerID="0f14fe1839b8b92dbe85ddbc589ba679b24fff4b6eb14fed86b937eae709352e" exitCode=0 Nov 26 06:21:56 crc kubenswrapper[4871]: I1126 06:21:56.244330 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hgth" event={"ID":"88176307-a2eb-4a84-9abb-f88ee3635a6b","Type":"ContainerDied","Data":"0f14fe1839b8b92dbe85ddbc589ba679b24fff4b6eb14fed86b937eae709352e"} Nov 26 06:21:56 crc kubenswrapper[4871]: I1126 06:21:56.244348 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hgth" event={"ID":"88176307-a2eb-4a84-9abb-f88ee3635a6b","Type":"ContainerStarted","Data":"9509c5fbf057c6e6191182366f4e941cd69bf895c56664c5cbf145ba9ab67eba"} Nov 26 06:21:56 crc kubenswrapper[4871]: I1126 06:21:56.985415 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-z9x8c"] Nov 26 06:21:56 crc kubenswrapper[4871]: I1126 06:21:56.988615 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z9x8c" Nov 26 06:21:56 crc kubenswrapper[4871]: I1126 06:21:56.997518 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z9x8c"] Nov 26 06:21:57 crc kubenswrapper[4871]: I1126 06:21:57.077627 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55qh6\" (UniqueName: \"kubernetes.io/projected/5dca197c-9641-4f53-8249-38842a736ac0-kube-api-access-55qh6\") pod \"community-operators-z9x8c\" (UID: \"5dca197c-9641-4f53-8249-38842a736ac0\") " pod="openshift-marketplace/community-operators-z9x8c" Nov 26 06:21:57 crc kubenswrapper[4871]: I1126 06:21:57.077926 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5dca197c-9641-4f53-8249-38842a736ac0-catalog-content\") pod \"community-operators-z9x8c\" (UID: \"5dca197c-9641-4f53-8249-38842a736ac0\") " pod="openshift-marketplace/community-operators-z9x8c" Nov 26 06:21:57 crc kubenswrapper[4871]: I1126 06:21:57.078041 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5dca197c-9641-4f53-8249-38842a736ac0-utilities\") pod \"community-operators-z9x8c\" (UID: \"5dca197c-9641-4f53-8249-38842a736ac0\") " pod="openshift-marketplace/community-operators-z9x8c" Nov 26 06:21:57 crc kubenswrapper[4871]: I1126 06:21:57.180759 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55qh6\" (UniqueName: \"kubernetes.io/projected/5dca197c-9641-4f53-8249-38842a736ac0-kube-api-access-55qh6\") pod \"community-operators-z9x8c\" (UID: \"5dca197c-9641-4f53-8249-38842a736ac0\") " pod="openshift-marketplace/community-operators-z9x8c" Nov 26 06:21:57 crc kubenswrapper[4871]: I1126 06:21:57.181298 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5dca197c-9641-4f53-8249-38842a736ac0-catalog-content\") pod \"community-operators-z9x8c\" (UID: \"5dca197c-9641-4f53-8249-38842a736ac0\") " pod="openshift-marketplace/community-operators-z9x8c" Nov 26 06:21:57 crc kubenswrapper[4871]: I1126 06:21:57.181445 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5dca197c-9641-4f53-8249-38842a736ac0-utilities\") pod \"community-operators-z9x8c\" (UID: \"5dca197c-9641-4f53-8249-38842a736ac0\") " pod="openshift-marketplace/community-operators-z9x8c" Nov 26 06:21:57 crc kubenswrapper[4871]: I1126 06:21:57.181776 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5dca197c-9641-4f53-8249-38842a736ac0-catalog-content\") pod \"community-operators-z9x8c\" (UID: \"5dca197c-9641-4f53-8249-38842a736ac0\") " pod="openshift-marketplace/community-operators-z9x8c" Nov 26 06:21:57 crc kubenswrapper[4871]: I1126 06:21:57.181961 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5dca197c-9641-4f53-8249-38842a736ac0-utilities\") pod \"community-operators-z9x8c\" (UID: \"5dca197c-9641-4f53-8249-38842a736ac0\") " pod="openshift-marketplace/community-operators-z9x8c" Nov 26 06:21:57 crc kubenswrapper[4871]: I1126 06:21:57.199787 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55qh6\" (UniqueName: \"kubernetes.io/projected/5dca197c-9641-4f53-8249-38842a736ac0-kube-api-access-55qh6\") pod \"community-operators-z9x8c\" (UID: \"5dca197c-9641-4f53-8249-38842a736ac0\") " pod="openshift-marketplace/community-operators-z9x8c" Nov 26 06:21:57 crc kubenswrapper[4871]: I1126 06:21:57.254630 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pglwr" event={"ID":"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2","Type":"ContainerStarted","Data":"22225c33c44f841c74f0aadce772af3ce02b8b63f27ed7d63a5dd43f89748fd2"} Nov 26 06:21:57 crc kubenswrapper[4871]: I1126 06:21:57.311070 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z9x8c" Nov 26 06:21:58 crc kubenswrapper[4871]: I1126 06:21:58.055055 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z9x8c"] Nov 26 06:21:58 crc kubenswrapper[4871]: I1126 06:21:58.264254 4871 generic.go:334] "Generic (PLEG): container finished" podID="ae7307ba-fd30-45fd-9d67-b7a3688dcdb2" containerID="22225c33c44f841c74f0aadce772af3ce02b8b63f27ed7d63a5dd43f89748fd2" exitCode=0 Nov 26 06:21:58 crc kubenswrapper[4871]: I1126 06:21:58.264322 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pglwr" event={"ID":"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2","Type":"ContainerDied","Data":"22225c33c44f841c74f0aadce772af3ce02b8b63f27ed7d63a5dd43f89748fd2"} Nov 26 06:21:58 crc kubenswrapper[4871]: I1126 06:21:58.269126 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hgth" event={"ID":"88176307-a2eb-4a84-9abb-f88ee3635a6b","Type":"ContainerStarted","Data":"57b82c052aa925aff298890ea840bac1361b95a9b7ae29c05a4207999c439d59"} Nov 26 06:21:58 crc kubenswrapper[4871]: I1126 06:21:58.272826 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z9x8c" event={"ID":"5dca197c-9641-4f53-8249-38842a736ac0","Type":"ContainerStarted","Data":"458c4b2dee3a32f6b7f06b26748ffc4a59707b32dfa5b25d4a20930eac22195a"} Nov 26 06:21:59 crc kubenswrapper[4871]: I1126 06:21:59.289497 4871 generic.go:334] "Generic (PLEG): container finished" podID="5dca197c-9641-4f53-8249-38842a736ac0" containerID="b4420c1359669380853fa675ac53c9580b17064e897bbc38e746b77c5e79733a" exitCode=0 Nov 26 06:21:59 crc kubenswrapper[4871]: I1126 06:21:59.289562 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z9x8c" event={"ID":"5dca197c-9641-4f53-8249-38842a736ac0","Type":"ContainerDied","Data":"b4420c1359669380853fa675ac53c9580b17064e897bbc38e746b77c5e79733a"} Nov 26 06:21:59 crc kubenswrapper[4871]: I1126 06:21:59.294628 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pglwr" event={"ID":"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2","Type":"ContainerStarted","Data":"9394641195bd5628ba339fe7def8a3084a3f67f5737b581b84938b33c6a080c5"} Nov 26 06:21:59 crc kubenswrapper[4871]: I1126 06:21:59.345597 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pglwr" podStartSLOduration=2.902797847 podStartE2EDuration="5.3455417s" podCreationTimestamp="2025-11-26 06:21:54 +0000 UTC" firstStartedPulling="2025-11-26 06:21:56.243834347 +0000 UTC m=+3374.426885933" lastFinishedPulling="2025-11-26 06:21:58.68657816 +0000 UTC m=+3376.869629786" observedRunningTime="2025-11-26 06:21:59.329979207 +0000 UTC m=+3377.513030793" watchObservedRunningTime="2025-11-26 06:21:59.3455417 +0000 UTC m=+3377.528593306" Nov 26 06:22:00 crc kubenswrapper[4871]: I1126 06:22:00.315367 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z9x8c" event={"ID":"5dca197c-9641-4f53-8249-38842a736ac0","Type":"ContainerStarted","Data":"50bcc68677eb912418fa3c49d33cceac99f75002fe7fd69869137d5396ffe688"} Nov 26 06:22:00 crc kubenswrapper[4871]: I1126 06:22:00.319621 4871 generic.go:334] "Generic (PLEG): container finished" podID="88176307-a2eb-4a84-9abb-f88ee3635a6b" containerID="57b82c052aa925aff298890ea840bac1361b95a9b7ae29c05a4207999c439d59" exitCode=0 Nov 26 06:22:00 crc kubenswrapper[4871]: I1126 06:22:00.319808 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hgth" event={"ID":"88176307-a2eb-4a84-9abb-f88ee3635a6b","Type":"ContainerDied","Data":"57b82c052aa925aff298890ea840bac1361b95a9b7ae29c05a4207999c439d59"} Nov 26 06:22:01 crc kubenswrapper[4871]: I1126 06:22:01.334920 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hgth" event={"ID":"88176307-a2eb-4a84-9abb-f88ee3635a6b","Type":"ContainerStarted","Data":"78a4c698fa67a95f9d889e77c05d4f9102f2d6958903c30fcb29b612bf25f32c"} Nov 26 06:22:01 crc kubenswrapper[4871]: I1126 06:22:01.360853 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2hgth" podStartSLOduration=2.8335621 podStartE2EDuration="7.360833902s" podCreationTimestamp="2025-11-26 06:21:54 +0000 UTC" firstStartedPulling="2025-11-26 06:21:56.247061246 +0000 UTC m=+3374.430112832" lastFinishedPulling="2025-11-26 06:22:00.774333038 +0000 UTC m=+3378.957384634" observedRunningTime="2025-11-26 06:22:01.35425748 +0000 UTC m=+3379.537309096" watchObservedRunningTime="2025-11-26 06:22:01.360833902 +0000 UTC m=+3379.543885488" Nov 26 06:22:02 crc kubenswrapper[4871]: I1126 06:22:02.350171 4871 generic.go:334] "Generic (PLEG): container finished" podID="5dca197c-9641-4f53-8249-38842a736ac0" containerID="50bcc68677eb912418fa3c49d33cceac99f75002fe7fd69869137d5396ffe688" exitCode=0 Nov 26 06:22:02 crc kubenswrapper[4871]: I1126 06:22:02.350262 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z9x8c" event={"ID":"5dca197c-9641-4f53-8249-38842a736ac0","Type":"ContainerDied","Data":"50bcc68677eb912418fa3c49d33cceac99f75002fe7fd69869137d5396ffe688"} Nov 26 06:22:03 crc kubenswrapper[4871]: I1126 06:22:03.368232 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z9x8c" event={"ID":"5dca197c-9641-4f53-8249-38842a736ac0","Type":"ContainerStarted","Data":"7758762dd38df70bfc7bae82a0faa8bbae003c5c80e2b48cd0612bae278e0598"} Nov 26 06:22:03 crc kubenswrapper[4871]: I1126 06:22:03.398480 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-z9x8c" podStartSLOduration=3.874785083 podStartE2EDuration="7.398463004s" podCreationTimestamp="2025-11-26 06:21:56 +0000 UTC" firstStartedPulling="2025-11-26 06:21:59.291294812 +0000 UTC m=+3377.474346419" lastFinishedPulling="2025-11-26 06:22:02.814972744 +0000 UTC m=+3380.998024340" observedRunningTime="2025-11-26 06:22:03.389116723 +0000 UTC m=+3381.572168309" watchObservedRunningTime="2025-11-26 06:22:03.398463004 +0000 UTC m=+3381.581514590" Nov 26 06:22:04 crc kubenswrapper[4871]: I1126 06:22:04.734434 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pglwr" Nov 26 06:22:04 crc kubenswrapper[4871]: I1126 06:22:04.734748 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pglwr" Nov 26 06:22:04 crc kubenswrapper[4871]: I1126 06:22:04.789014 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pglwr" Nov 26 06:22:04 crc kubenswrapper[4871]: I1126 06:22:04.836688 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-p57sb" Nov 26 06:22:04 crc kubenswrapper[4871]: I1126 06:22:04.907365 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-p57sb" Nov 26 06:22:04 crc kubenswrapper[4871]: I1126 06:22:04.919908 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2hgth" Nov 26 06:22:04 crc kubenswrapper[4871]: I1126 06:22:04.919970 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2hgth" Nov 26 06:22:04 crc kubenswrapper[4871]: I1126 06:22:04.987430 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2hgth" Nov 26 06:22:05 crc kubenswrapper[4871]: I1126 06:22:05.450600 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pglwr" Nov 26 06:22:07 crc kubenswrapper[4871]: I1126 06:22:07.311848 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-z9x8c" Nov 26 06:22:07 crc kubenswrapper[4871]: I1126 06:22:07.312330 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-z9x8c" Nov 26 06:22:07 crc kubenswrapper[4871]: I1126 06:22:07.571848 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p57sb"] Nov 26 06:22:07 crc kubenswrapper[4871]: I1126 06:22:07.572496 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-p57sb" podUID="62e90694-5b6e-467a-908a-35ba613a1f08" containerName="registry-server" containerID="cri-o://bfdd9ac24093feb71ed4c94ab02a853647c48f49376cb10e64ca0a2f6d993ca7" gracePeriod=2 Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.063194 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p57sb" Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.161376 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62e90694-5b6e-467a-908a-35ba613a1f08-utilities\") pod \"62e90694-5b6e-467a-908a-35ba613a1f08\" (UID: \"62e90694-5b6e-467a-908a-35ba613a1f08\") " Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.161419 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62e90694-5b6e-467a-908a-35ba613a1f08-catalog-content\") pod \"62e90694-5b6e-467a-908a-35ba613a1f08\" (UID: \"62e90694-5b6e-467a-908a-35ba613a1f08\") " Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.161542 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tvntd\" (UniqueName: \"kubernetes.io/projected/62e90694-5b6e-467a-908a-35ba613a1f08-kube-api-access-tvntd\") pod \"62e90694-5b6e-467a-908a-35ba613a1f08\" (UID: \"62e90694-5b6e-467a-908a-35ba613a1f08\") " Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.162823 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62e90694-5b6e-467a-908a-35ba613a1f08-utilities" (OuterVolumeSpecName: "utilities") pod "62e90694-5b6e-467a-908a-35ba613a1f08" (UID: "62e90694-5b6e-467a-908a-35ba613a1f08"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.167576 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62e90694-5b6e-467a-908a-35ba613a1f08-kube-api-access-tvntd" (OuterVolumeSpecName: "kube-api-access-tvntd") pod "62e90694-5b6e-467a-908a-35ba613a1f08" (UID: "62e90694-5b6e-467a-908a-35ba613a1f08"). InnerVolumeSpecName "kube-api-access-tvntd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.255890 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62e90694-5b6e-467a-908a-35ba613a1f08-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "62e90694-5b6e-467a-908a-35ba613a1f08" (UID: "62e90694-5b6e-467a-908a-35ba613a1f08"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.263648 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tvntd\" (UniqueName: \"kubernetes.io/projected/62e90694-5b6e-467a-908a-35ba613a1f08-kube-api-access-tvntd\") on node \"crc\" DevicePath \"\"" Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.263691 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62e90694-5b6e-467a-908a-35ba613a1f08-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.263703 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62e90694-5b6e-467a-908a-35ba613a1f08-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.385071 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-z9x8c" podUID="5dca197c-9641-4f53-8249-38842a736ac0" containerName="registry-server" probeResult="failure" output=< Nov 26 06:22:08 crc kubenswrapper[4871]: timeout: failed to connect service ":50051" within 1s Nov 26 06:22:08 crc kubenswrapper[4871]: > Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.432920 4871 generic.go:334] "Generic (PLEG): container finished" podID="62e90694-5b6e-467a-908a-35ba613a1f08" containerID="bfdd9ac24093feb71ed4c94ab02a853647c48f49376cb10e64ca0a2f6d993ca7" exitCode=0 Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.432962 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p57sb" event={"ID":"62e90694-5b6e-467a-908a-35ba613a1f08","Type":"ContainerDied","Data":"bfdd9ac24093feb71ed4c94ab02a853647c48f49376cb10e64ca0a2f6d993ca7"} Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.432987 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p57sb" event={"ID":"62e90694-5b6e-467a-908a-35ba613a1f08","Type":"ContainerDied","Data":"a01638595211f4f22f88326551b27bd4c695468a9880ef9fd036c2250b5181d7"} Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.433005 4871 scope.go:117] "RemoveContainer" containerID="bfdd9ac24093feb71ed4c94ab02a853647c48f49376cb10e64ca0a2f6d993ca7" Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.433012 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p57sb" Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.469963 4871 scope.go:117] "RemoveContainer" containerID="882e159b60ca72ae958e324fb49f391394b32b6e892e76675765f18c5ef01066" Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.493617 4871 scope.go:117] "RemoveContainer" containerID="4692dd39d4639c9cd5fa1c9ae39864a38979a8ab36a956b94ba5ceb069be9e39" Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.495021 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p57sb"] Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.531502 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-p57sb"] Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.558853 4871 scope.go:117] "RemoveContainer" containerID="bfdd9ac24093feb71ed4c94ab02a853647c48f49376cb10e64ca0a2f6d993ca7" Nov 26 06:22:08 crc kubenswrapper[4871]: E1126 06:22:08.559946 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bfdd9ac24093feb71ed4c94ab02a853647c48f49376cb10e64ca0a2f6d993ca7\": container with ID starting with bfdd9ac24093feb71ed4c94ab02a853647c48f49376cb10e64ca0a2f6d993ca7 not found: ID does not exist" containerID="bfdd9ac24093feb71ed4c94ab02a853647c48f49376cb10e64ca0a2f6d993ca7" Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.559999 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bfdd9ac24093feb71ed4c94ab02a853647c48f49376cb10e64ca0a2f6d993ca7"} err="failed to get container status \"bfdd9ac24093feb71ed4c94ab02a853647c48f49376cb10e64ca0a2f6d993ca7\": rpc error: code = NotFound desc = could not find container \"bfdd9ac24093feb71ed4c94ab02a853647c48f49376cb10e64ca0a2f6d993ca7\": container with ID starting with bfdd9ac24093feb71ed4c94ab02a853647c48f49376cb10e64ca0a2f6d993ca7 not found: ID does not exist" Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.560035 4871 scope.go:117] "RemoveContainer" containerID="882e159b60ca72ae958e324fb49f391394b32b6e892e76675765f18c5ef01066" Nov 26 06:22:08 crc kubenswrapper[4871]: E1126 06:22:08.560557 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"882e159b60ca72ae958e324fb49f391394b32b6e892e76675765f18c5ef01066\": container with ID starting with 882e159b60ca72ae958e324fb49f391394b32b6e892e76675765f18c5ef01066 not found: ID does not exist" containerID="882e159b60ca72ae958e324fb49f391394b32b6e892e76675765f18c5ef01066" Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.560606 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"882e159b60ca72ae958e324fb49f391394b32b6e892e76675765f18c5ef01066"} err="failed to get container status \"882e159b60ca72ae958e324fb49f391394b32b6e892e76675765f18c5ef01066\": rpc error: code = NotFound desc = could not find container \"882e159b60ca72ae958e324fb49f391394b32b6e892e76675765f18c5ef01066\": container with ID starting with 882e159b60ca72ae958e324fb49f391394b32b6e892e76675765f18c5ef01066 not found: ID does not exist" Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.560635 4871 scope.go:117] "RemoveContainer" containerID="4692dd39d4639c9cd5fa1c9ae39864a38979a8ab36a956b94ba5ceb069be9e39" Nov 26 06:22:08 crc kubenswrapper[4871]: E1126 06:22:08.560921 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4692dd39d4639c9cd5fa1c9ae39864a38979a8ab36a956b94ba5ceb069be9e39\": container with ID starting with 4692dd39d4639c9cd5fa1c9ae39864a38979a8ab36a956b94ba5ceb069be9e39 not found: ID does not exist" containerID="4692dd39d4639c9cd5fa1c9ae39864a38979a8ab36a956b94ba5ceb069be9e39" Nov 26 06:22:08 crc kubenswrapper[4871]: I1126 06:22:08.560943 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4692dd39d4639c9cd5fa1c9ae39864a38979a8ab36a956b94ba5ceb069be9e39"} err="failed to get container status \"4692dd39d4639c9cd5fa1c9ae39864a38979a8ab36a956b94ba5ceb069be9e39\": rpc error: code = NotFound desc = could not find container \"4692dd39d4639c9cd5fa1c9ae39864a38979a8ab36a956b94ba5ceb069be9e39\": container with ID starting with 4692dd39d4639c9cd5fa1c9ae39864a38979a8ab36a956b94ba5ceb069be9e39 not found: ID does not exist" Nov 26 06:22:09 crc kubenswrapper[4871]: I1126 06:22:09.370255 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pglwr"] Nov 26 06:22:09 crc kubenswrapper[4871]: I1126 06:22:09.370545 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pglwr" podUID="ae7307ba-fd30-45fd-9d67-b7a3688dcdb2" containerName="registry-server" containerID="cri-o://9394641195bd5628ba339fe7def8a3084a3f67f5737b581b84938b33c6a080c5" gracePeriod=2 Nov 26 06:22:09 crc kubenswrapper[4871]: I1126 06:22:09.860805 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pglwr" Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.000677 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-blch6\" (UniqueName: \"kubernetes.io/projected/ae7307ba-fd30-45fd-9d67-b7a3688dcdb2-kube-api-access-blch6\") pod \"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2\" (UID: \"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2\") " Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.000782 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae7307ba-fd30-45fd-9d67-b7a3688dcdb2-utilities\") pod \"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2\" (UID: \"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2\") " Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.000836 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae7307ba-fd30-45fd-9d67-b7a3688dcdb2-catalog-content\") pod \"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2\" (UID: \"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2\") " Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.002078 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae7307ba-fd30-45fd-9d67-b7a3688dcdb2-utilities" (OuterVolumeSpecName: "utilities") pod "ae7307ba-fd30-45fd-9d67-b7a3688dcdb2" (UID: "ae7307ba-fd30-45fd-9d67-b7a3688dcdb2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.006720 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae7307ba-fd30-45fd-9d67-b7a3688dcdb2-kube-api-access-blch6" (OuterVolumeSpecName: "kube-api-access-blch6") pod "ae7307ba-fd30-45fd-9d67-b7a3688dcdb2" (UID: "ae7307ba-fd30-45fd-9d67-b7a3688dcdb2"). InnerVolumeSpecName "kube-api-access-blch6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.020163 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae7307ba-fd30-45fd-9d67-b7a3688dcdb2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ae7307ba-fd30-45fd-9d67-b7a3688dcdb2" (UID: "ae7307ba-fd30-45fd-9d67-b7a3688dcdb2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.103795 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-blch6\" (UniqueName: \"kubernetes.io/projected/ae7307ba-fd30-45fd-9d67-b7a3688dcdb2-kube-api-access-blch6\") on node \"crc\" DevicePath \"\"" Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.104072 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae7307ba-fd30-45fd-9d67-b7a3688dcdb2-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.104191 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae7307ba-fd30-45fd-9d67-b7a3688dcdb2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.457986 4871 generic.go:334] "Generic (PLEG): container finished" podID="ae7307ba-fd30-45fd-9d67-b7a3688dcdb2" containerID="9394641195bd5628ba339fe7def8a3084a3f67f5737b581b84938b33c6a080c5" exitCode=0 Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.458057 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pglwr" Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.458079 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pglwr" event={"ID":"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2","Type":"ContainerDied","Data":"9394641195bd5628ba339fe7def8a3084a3f67f5737b581b84938b33c6a080c5"} Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.458737 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pglwr" event={"ID":"ae7307ba-fd30-45fd-9d67-b7a3688dcdb2","Type":"ContainerDied","Data":"7715d5849ab0ae54cb5632cb0dd77248c4e76e1674b956c7603a8bec87c725d1"} Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.458804 4871 scope.go:117] "RemoveContainer" containerID="9394641195bd5628ba339fe7def8a3084a3f67f5737b581b84938b33c6a080c5" Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.491814 4871 scope.go:117] "RemoveContainer" containerID="22225c33c44f841c74f0aadce772af3ce02b8b63f27ed7d63a5dd43f89748fd2" Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.495306 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pglwr"] Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.506348 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pglwr"] Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.522291 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62e90694-5b6e-467a-908a-35ba613a1f08" path="/var/lib/kubelet/pods/62e90694-5b6e-467a-908a-35ba613a1f08/volumes" Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.524462 4871 scope.go:117] "RemoveContainer" containerID="9973055d330c1a9be0e6bdbc241d97058a9ac200bb99c0f430d88fee2dc44843" Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.525405 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae7307ba-fd30-45fd-9d67-b7a3688dcdb2" path="/var/lib/kubelet/pods/ae7307ba-fd30-45fd-9d67-b7a3688dcdb2/volumes" Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.571936 4871 scope.go:117] "RemoveContainer" containerID="9394641195bd5628ba339fe7def8a3084a3f67f5737b581b84938b33c6a080c5" Nov 26 06:22:10 crc kubenswrapper[4871]: E1126 06:22:10.573888 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9394641195bd5628ba339fe7def8a3084a3f67f5737b581b84938b33c6a080c5\": container with ID starting with 9394641195bd5628ba339fe7def8a3084a3f67f5737b581b84938b33c6a080c5 not found: ID does not exist" containerID="9394641195bd5628ba339fe7def8a3084a3f67f5737b581b84938b33c6a080c5" Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.573931 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9394641195bd5628ba339fe7def8a3084a3f67f5737b581b84938b33c6a080c5"} err="failed to get container status \"9394641195bd5628ba339fe7def8a3084a3f67f5737b581b84938b33c6a080c5\": rpc error: code = NotFound desc = could not find container \"9394641195bd5628ba339fe7def8a3084a3f67f5737b581b84938b33c6a080c5\": container with ID starting with 9394641195bd5628ba339fe7def8a3084a3f67f5737b581b84938b33c6a080c5 not found: ID does not exist" Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.573961 4871 scope.go:117] "RemoveContainer" containerID="22225c33c44f841c74f0aadce772af3ce02b8b63f27ed7d63a5dd43f89748fd2" Nov 26 06:22:10 crc kubenswrapper[4871]: E1126 06:22:10.574440 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22225c33c44f841c74f0aadce772af3ce02b8b63f27ed7d63a5dd43f89748fd2\": container with ID starting with 22225c33c44f841c74f0aadce772af3ce02b8b63f27ed7d63a5dd43f89748fd2 not found: ID does not exist" containerID="22225c33c44f841c74f0aadce772af3ce02b8b63f27ed7d63a5dd43f89748fd2" Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.574461 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22225c33c44f841c74f0aadce772af3ce02b8b63f27ed7d63a5dd43f89748fd2"} err="failed to get container status \"22225c33c44f841c74f0aadce772af3ce02b8b63f27ed7d63a5dd43f89748fd2\": rpc error: code = NotFound desc = could not find container \"22225c33c44f841c74f0aadce772af3ce02b8b63f27ed7d63a5dd43f89748fd2\": container with ID starting with 22225c33c44f841c74f0aadce772af3ce02b8b63f27ed7d63a5dd43f89748fd2 not found: ID does not exist" Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.574472 4871 scope.go:117] "RemoveContainer" containerID="9973055d330c1a9be0e6bdbc241d97058a9ac200bb99c0f430d88fee2dc44843" Nov 26 06:22:10 crc kubenswrapper[4871]: E1126 06:22:10.574801 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9973055d330c1a9be0e6bdbc241d97058a9ac200bb99c0f430d88fee2dc44843\": container with ID starting with 9973055d330c1a9be0e6bdbc241d97058a9ac200bb99c0f430d88fee2dc44843 not found: ID does not exist" containerID="9973055d330c1a9be0e6bdbc241d97058a9ac200bb99c0f430d88fee2dc44843" Nov 26 06:22:10 crc kubenswrapper[4871]: I1126 06:22:10.574821 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9973055d330c1a9be0e6bdbc241d97058a9ac200bb99c0f430d88fee2dc44843"} err="failed to get container status \"9973055d330c1a9be0e6bdbc241d97058a9ac200bb99c0f430d88fee2dc44843\": rpc error: code = NotFound desc = could not find container \"9973055d330c1a9be0e6bdbc241d97058a9ac200bb99c0f430d88fee2dc44843\": container with ID starting with 9973055d330c1a9be0e6bdbc241d97058a9ac200bb99c0f430d88fee2dc44843 not found: ID does not exist" Nov 26 06:22:14 crc kubenswrapper[4871]: I1126 06:22:14.968961 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2hgth" Nov 26 06:22:17 crc kubenswrapper[4871]: I1126 06:22:17.386173 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-z9x8c" Nov 26 06:22:17 crc kubenswrapper[4871]: I1126 06:22:17.450965 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-z9x8c" Nov 26 06:22:18 crc kubenswrapper[4871]: I1126 06:22:18.774799 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2hgth"] Nov 26 06:22:18 crc kubenswrapper[4871]: I1126 06:22:18.775752 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2hgth" podUID="88176307-a2eb-4a84-9abb-f88ee3635a6b" containerName="registry-server" containerID="cri-o://78a4c698fa67a95f9d889e77c05d4f9102f2d6958903c30fcb29b612bf25f32c" gracePeriod=2 Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.297865 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2hgth" Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.421878 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88176307-a2eb-4a84-9abb-f88ee3635a6b-catalog-content\") pod \"88176307-a2eb-4a84-9abb-f88ee3635a6b\" (UID: \"88176307-a2eb-4a84-9abb-f88ee3635a6b\") " Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.422050 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g875t\" (UniqueName: \"kubernetes.io/projected/88176307-a2eb-4a84-9abb-f88ee3635a6b-kube-api-access-g875t\") pod \"88176307-a2eb-4a84-9abb-f88ee3635a6b\" (UID: \"88176307-a2eb-4a84-9abb-f88ee3635a6b\") " Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.422087 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88176307-a2eb-4a84-9abb-f88ee3635a6b-utilities\") pod \"88176307-a2eb-4a84-9abb-f88ee3635a6b\" (UID: \"88176307-a2eb-4a84-9abb-f88ee3635a6b\") " Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.423195 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88176307-a2eb-4a84-9abb-f88ee3635a6b-utilities" (OuterVolumeSpecName: "utilities") pod "88176307-a2eb-4a84-9abb-f88ee3635a6b" (UID: "88176307-a2eb-4a84-9abb-f88ee3635a6b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.434434 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88176307-a2eb-4a84-9abb-f88ee3635a6b-kube-api-access-g875t" (OuterVolumeSpecName: "kube-api-access-g875t") pod "88176307-a2eb-4a84-9abb-f88ee3635a6b" (UID: "88176307-a2eb-4a84-9abb-f88ee3635a6b"). InnerVolumeSpecName "kube-api-access-g875t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.469998 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88176307-a2eb-4a84-9abb-f88ee3635a6b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "88176307-a2eb-4a84-9abb-f88ee3635a6b" (UID: "88176307-a2eb-4a84-9abb-f88ee3635a6b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.525011 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88176307-a2eb-4a84-9abb-f88ee3635a6b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.525280 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g875t\" (UniqueName: \"kubernetes.io/projected/88176307-a2eb-4a84-9abb-f88ee3635a6b-kube-api-access-g875t\") on node \"crc\" DevicePath \"\"" Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.525291 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88176307-a2eb-4a84-9abb-f88ee3635a6b-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.567270 4871 generic.go:334] "Generic (PLEG): container finished" podID="88176307-a2eb-4a84-9abb-f88ee3635a6b" containerID="78a4c698fa67a95f9d889e77c05d4f9102f2d6958903c30fcb29b612bf25f32c" exitCode=0 Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.567318 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2hgth" Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.567322 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hgth" event={"ID":"88176307-a2eb-4a84-9abb-f88ee3635a6b","Type":"ContainerDied","Data":"78a4c698fa67a95f9d889e77c05d4f9102f2d6958903c30fcb29b612bf25f32c"} Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.567353 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hgth" event={"ID":"88176307-a2eb-4a84-9abb-f88ee3635a6b","Type":"ContainerDied","Data":"9509c5fbf057c6e6191182366f4e941cd69bf895c56664c5cbf145ba9ab67eba"} Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.567372 4871 scope.go:117] "RemoveContainer" containerID="78a4c698fa67a95f9d889e77c05d4f9102f2d6958903c30fcb29b612bf25f32c" Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.596747 4871 scope.go:117] "RemoveContainer" containerID="57b82c052aa925aff298890ea840bac1361b95a9b7ae29c05a4207999c439d59" Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.605716 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2hgth"] Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.614204 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2hgth"] Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.620901 4871 scope.go:117] "RemoveContainer" containerID="0f14fe1839b8b92dbe85ddbc589ba679b24fff4b6eb14fed86b937eae709352e" Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.694006 4871 scope.go:117] "RemoveContainer" containerID="78a4c698fa67a95f9d889e77c05d4f9102f2d6958903c30fcb29b612bf25f32c" Nov 26 06:22:19 crc kubenswrapper[4871]: E1126 06:22:19.694495 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78a4c698fa67a95f9d889e77c05d4f9102f2d6958903c30fcb29b612bf25f32c\": container with ID starting with 78a4c698fa67a95f9d889e77c05d4f9102f2d6958903c30fcb29b612bf25f32c not found: ID does not exist" containerID="78a4c698fa67a95f9d889e77c05d4f9102f2d6958903c30fcb29b612bf25f32c" Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.694566 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78a4c698fa67a95f9d889e77c05d4f9102f2d6958903c30fcb29b612bf25f32c"} err="failed to get container status \"78a4c698fa67a95f9d889e77c05d4f9102f2d6958903c30fcb29b612bf25f32c\": rpc error: code = NotFound desc = could not find container \"78a4c698fa67a95f9d889e77c05d4f9102f2d6958903c30fcb29b612bf25f32c\": container with ID starting with 78a4c698fa67a95f9d889e77c05d4f9102f2d6958903c30fcb29b612bf25f32c not found: ID does not exist" Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.694611 4871 scope.go:117] "RemoveContainer" containerID="57b82c052aa925aff298890ea840bac1361b95a9b7ae29c05a4207999c439d59" Nov 26 06:22:19 crc kubenswrapper[4871]: E1126 06:22:19.694904 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57b82c052aa925aff298890ea840bac1361b95a9b7ae29c05a4207999c439d59\": container with ID starting with 57b82c052aa925aff298890ea840bac1361b95a9b7ae29c05a4207999c439d59 not found: ID does not exist" containerID="57b82c052aa925aff298890ea840bac1361b95a9b7ae29c05a4207999c439d59" Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.694934 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57b82c052aa925aff298890ea840bac1361b95a9b7ae29c05a4207999c439d59"} err="failed to get container status \"57b82c052aa925aff298890ea840bac1361b95a9b7ae29c05a4207999c439d59\": rpc error: code = NotFound desc = could not find container \"57b82c052aa925aff298890ea840bac1361b95a9b7ae29c05a4207999c439d59\": container with ID starting with 57b82c052aa925aff298890ea840bac1361b95a9b7ae29c05a4207999c439d59 not found: ID does not exist" Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.694955 4871 scope.go:117] "RemoveContainer" containerID="0f14fe1839b8b92dbe85ddbc589ba679b24fff4b6eb14fed86b937eae709352e" Nov 26 06:22:19 crc kubenswrapper[4871]: E1126 06:22:19.695153 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f14fe1839b8b92dbe85ddbc589ba679b24fff4b6eb14fed86b937eae709352e\": container with ID starting with 0f14fe1839b8b92dbe85ddbc589ba679b24fff4b6eb14fed86b937eae709352e not found: ID does not exist" containerID="0f14fe1839b8b92dbe85ddbc589ba679b24fff4b6eb14fed86b937eae709352e" Nov 26 06:22:19 crc kubenswrapper[4871]: I1126 06:22:19.695182 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f14fe1839b8b92dbe85ddbc589ba679b24fff4b6eb14fed86b937eae709352e"} err="failed to get container status \"0f14fe1839b8b92dbe85ddbc589ba679b24fff4b6eb14fed86b937eae709352e\": rpc error: code = NotFound desc = could not find container \"0f14fe1839b8b92dbe85ddbc589ba679b24fff4b6eb14fed86b937eae709352e\": container with ID starting with 0f14fe1839b8b92dbe85ddbc589ba679b24fff4b6eb14fed86b937eae709352e not found: ID does not exist" Nov 26 06:22:20 crc kubenswrapper[4871]: I1126 06:22:20.520870 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88176307-a2eb-4a84-9abb-f88ee3635a6b" path="/var/lib/kubelet/pods/88176307-a2eb-4a84-9abb-f88ee3635a6b/volumes" Nov 26 06:22:21 crc kubenswrapper[4871]: I1126 06:22:21.178093 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z9x8c"] Nov 26 06:22:21 crc kubenswrapper[4871]: I1126 06:22:21.178788 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-z9x8c" podUID="5dca197c-9641-4f53-8249-38842a736ac0" containerName="registry-server" containerID="cri-o://7758762dd38df70bfc7bae82a0faa8bbae003c5c80e2b48cd0612bae278e0598" gracePeriod=2 Nov 26 06:22:21 crc kubenswrapper[4871]: I1126 06:22:21.612942 4871 generic.go:334] "Generic (PLEG): container finished" podID="5dca197c-9641-4f53-8249-38842a736ac0" containerID="7758762dd38df70bfc7bae82a0faa8bbae003c5c80e2b48cd0612bae278e0598" exitCode=0 Nov 26 06:22:21 crc kubenswrapper[4871]: I1126 06:22:21.613035 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z9x8c" event={"ID":"5dca197c-9641-4f53-8249-38842a736ac0","Type":"ContainerDied","Data":"7758762dd38df70bfc7bae82a0faa8bbae003c5c80e2b48cd0612bae278e0598"} Nov 26 06:22:21 crc kubenswrapper[4871]: I1126 06:22:21.736180 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z9x8c" Nov 26 06:22:21 crc kubenswrapper[4871]: I1126 06:22:21.874854 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5dca197c-9641-4f53-8249-38842a736ac0-catalog-content\") pod \"5dca197c-9641-4f53-8249-38842a736ac0\" (UID: \"5dca197c-9641-4f53-8249-38842a736ac0\") " Nov 26 06:22:21 crc kubenswrapper[4871]: I1126 06:22:21.874961 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5dca197c-9641-4f53-8249-38842a736ac0-utilities\") pod \"5dca197c-9641-4f53-8249-38842a736ac0\" (UID: \"5dca197c-9641-4f53-8249-38842a736ac0\") " Nov 26 06:22:21 crc kubenswrapper[4871]: I1126 06:22:21.875036 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-55qh6\" (UniqueName: \"kubernetes.io/projected/5dca197c-9641-4f53-8249-38842a736ac0-kube-api-access-55qh6\") pod \"5dca197c-9641-4f53-8249-38842a736ac0\" (UID: \"5dca197c-9641-4f53-8249-38842a736ac0\") " Nov 26 06:22:21 crc kubenswrapper[4871]: I1126 06:22:21.877485 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5dca197c-9641-4f53-8249-38842a736ac0-utilities" (OuterVolumeSpecName: "utilities") pod "5dca197c-9641-4f53-8249-38842a736ac0" (UID: "5dca197c-9641-4f53-8249-38842a736ac0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:22:21 crc kubenswrapper[4871]: I1126 06:22:21.884609 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5dca197c-9641-4f53-8249-38842a736ac0-kube-api-access-55qh6" (OuterVolumeSpecName: "kube-api-access-55qh6") pod "5dca197c-9641-4f53-8249-38842a736ac0" (UID: "5dca197c-9641-4f53-8249-38842a736ac0"). InnerVolumeSpecName "kube-api-access-55qh6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:22:21 crc kubenswrapper[4871]: I1126 06:22:21.956749 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5dca197c-9641-4f53-8249-38842a736ac0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5dca197c-9641-4f53-8249-38842a736ac0" (UID: "5dca197c-9641-4f53-8249-38842a736ac0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:22:21 crc kubenswrapper[4871]: I1126 06:22:21.977190 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5dca197c-9641-4f53-8249-38842a736ac0-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:22:21 crc kubenswrapper[4871]: I1126 06:22:21.977226 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-55qh6\" (UniqueName: \"kubernetes.io/projected/5dca197c-9641-4f53-8249-38842a736ac0-kube-api-access-55qh6\") on node \"crc\" DevicePath \"\"" Nov 26 06:22:21 crc kubenswrapper[4871]: I1126 06:22:21.977238 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5dca197c-9641-4f53-8249-38842a736ac0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:22:22 crc kubenswrapper[4871]: I1126 06:22:22.630123 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z9x8c" event={"ID":"5dca197c-9641-4f53-8249-38842a736ac0","Type":"ContainerDied","Data":"458c4b2dee3a32f6b7f06b26748ffc4a59707b32dfa5b25d4a20930eac22195a"} Nov 26 06:22:22 crc kubenswrapper[4871]: I1126 06:22:22.630641 4871 scope.go:117] "RemoveContainer" containerID="7758762dd38df70bfc7bae82a0faa8bbae003c5c80e2b48cd0612bae278e0598" Nov 26 06:22:22 crc kubenswrapper[4871]: I1126 06:22:22.630346 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z9x8c" Nov 26 06:22:22 crc kubenswrapper[4871]: I1126 06:22:22.666575 4871 scope.go:117] "RemoveContainer" containerID="50bcc68677eb912418fa3c49d33cceac99f75002fe7fd69869137d5396ffe688" Nov 26 06:22:22 crc kubenswrapper[4871]: I1126 06:22:22.689654 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z9x8c"] Nov 26 06:22:22 crc kubenswrapper[4871]: I1126 06:22:22.705691 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-z9x8c"] Nov 26 06:22:22 crc kubenswrapper[4871]: I1126 06:22:22.708074 4871 scope.go:117] "RemoveContainer" containerID="b4420c1359669380853fa675ac53c9580b17064e897bbc38e746b77c5e79733a" Nov 26 06:22:23 crc kubenswrapper[4871]: I1126 06:22:23.614947 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:22:23 crc kubenswrapper[4871]: I1126 06:22:23.615506 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:22:24 crc kubenswrapper[4871]: I1126 06:22:24.527314 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5dca197c-9641-4f53-8249-38842a736ac0" path="/var/lib/kubelet/pods/5dca197c-9641-4f53-8249-38842a736ac0/volumes" Nov 26 06:22:53 crc kubenswrapper[4871]: I1126 06:22:53.614613 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:22:53 crc kubenswrapper[4871]: I1126 06:22:53.615287 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:23:23 crc kubenswrapper[4871]: I1126 06:23:23.615430 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:23:23 crc kubenswrapper[4871]: I1126 06:23:23.615944 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:23:23 crc kubenswrapper[4871]: I1126 06:23:23.615993 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 06:23:23 crc kubenswrapper[4871]: I1126 06:23:23.616907 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2b7f2e346520824b3867687e2ae756bb9159d439cbe88ac2c9c598abefcaaaba"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 06:23:23 crc kubenswrapper[4871]: I1126 06:23:23.616986 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://2b7f2e346520824b3867687e2ae756bb9159d439cbe88ac2c9c598abefcaaaba" gracePeriod=600 Nov 26 06:23:24 crc kubenswrapper[4871]: I1126 06:23:24.365787 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="2b7f2e346520824b3867687e2ae756bb9159d439cbe88ac2c9c598abefcaaaba" exitCode=0 Nov 26 06:23:24 crc kubenswrapper[4871]: I1126 06:23:24.365868 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"2b7f2e346520824b3867687e2ae756bb9159d439cbe88ac2c9c598abefcaaaba"} Nov 26 06:23:24 crc kubenswrapper[4871]: I1126 06:23:24.366732 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2"} Nov 26 06:23:24 crc kubenswrapper[4871]: I1126 06:23:24.366772 4871 scope.go:117] "RemoveContainer" containerID="e845e7ebd5271d932ee660c9660f3a5a4cb8d826d1281db0245fb3669652a57f" Nov 26 06:24:19 crc kubenswrapper[4871]: E1126 06:24:19.498030 4871 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.44:60356->38.102.83.44:38809: write tcp 38.102.83.44:60356->38.102.83.44:38809: write: broken pipe Nov 26 06:25:23 crc kubenswrapper[4871]: I1126 06:25:23.614978 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:25:23 crc kubenswrapper[4871]: I1126 06:25:23.615567 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:25:53 crc kubenswrapper[4871]: I1126 06:25:53.615159 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:25:53 crc kubenswrapper[4871]: I1126 06:25:53.615592 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:26:23 crc kubenswrapper[4871]: I1126 06:26:23.615038 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:26:23 crc kubenswrapper[4871]: I1126 06:26:23.615826 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:26:23 crc kubenswrapper[4871]: I1126 06:26:23.615909 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 06:26:23 crc kubenswrapper[4871]: I1126 06:26:23.617053 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 06:26:23 crc kubenswrapper[4871]: I1126 06:26:23.617163 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" gracePeriod=600 Nov 26 06:26:23 crc kubenswrapper[4871]: E1126 06:26:23.743872 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:26:24 crc kubenswrapper[4871]: I1126 06:26:24.443301 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" exitCode=0 Nov 26 06:26:24 crc kubenswrapper[4871]: I1126 06:26:24.443377 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2"} Nov 26 06:26:24 crc kubenswrapper[4871]: I1126 06:26:24.443635 4871 scope.go:117] "RemoveContainer" containerID="2b7f2e346520824b3867687e2ae756bb9159d439cbe88ac2c9c598abefcaaaba" Nov 26 06:26:24 crc kubenswrapper[4871]: I1126 06:26:24.444323 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:26:24 crc kubenswrapper[4871]: E1126 06:26:24.444643 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:26:39 crc kubenswrapper[4871]: I1126 06:26:39.507167 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:26:39 crc kubenswrapper[4871]: E1126 06:26:39.508087 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:26:52 crc kubenswrapper[4871]: I1126 06:26:52.513724 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:26:52 crc kubenswrapper[4871]: E1126 06:26:52.514413 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:27:04 crc kubenswrapper[4871]: I1126 06:27:04.507177 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:27:04 crc kubenswrapper[4871]: E1126 06:27:04.508199 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:27:18 crc kubenswrapper[4871]: I1126 06:27:18.510313 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:27:18 crc kubenswrapper[4871]: E1126 06:27:18.511411 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:27:31 crc kubenswrapper[4871]: I1126 06:27:31.508027 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:27:31 crc kubenswrapper[4871]: E1126 06:27:31.508846 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:27:44 crc kubenswrapper[4871]: I1126 06:27:44.507954 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:27:44 crc kubenswrapper[4871]: E1126 06:27:44.508747 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:27:56 crc kubenswrapper[4871]: I1126 06:27:56.507499 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:27:56 crc kubenswrapper[4871]: E1126 06:27:56.508440 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:28:08 crc kubenswrapper[4871]: I1126 06:28:08.507512 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:28:08 crc kubenswrapper[4871]: E1126 06:28:08.508425 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:28:20 crc kubenswrapper[4871]: I1126 06:28:20.508270 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:28:20 crc kubenswrapper[4871]: E1126 06:28:20.509099 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:28:31 crc kubenswrapper[4871]: I1126 06:28:31.507458 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:28:31 crc kubenswrapper[4871]: E1126 06:28:31.508166 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:28:38 crc kubenswrapper[4871]: I1126 06:28:38.037663 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-78dd8485c9-fx6sv" podUID="fcca2594-c385-49cd-8354-7e4fcfab96c8" containerName="proxy-server" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 26 06:28:45 crc kubenswrapper[4871]: I1126 06:28:45.507965 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:28:45 crc kubenswrapper[4871]: E1126 06:28:45.508781 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:28:57 crc kubenswrapper[4871]: I1126 06:28:57.507798 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:28:57 crc kubenswrapper[4871]: E1126 06:28:57.508561 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:29:08 crc kubenswrapper[4871]: I1126 06:29:08.507212 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:29:08 crc kubenswrapper[4871]: E1126 06:29:08.508353 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:29:23 crc kubenswrapper[4871]: I1126 06:29:23.509120 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:29:23 crc kubenswrapper[4871]: E1126 06:29:23.511932 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:29:38 crc kubenswrapper[4871]: I1126 06:29:38.506855 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:29:38 crc kubenswrapper[4871]: E1126 06:29:38.507617 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:29:52 crc kubenswrapper[4871]: I1126 06:29:52.516117 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:29:52 crc kubenswrapper[4871]: E1126 06:29:52.517031 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.175779 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402310-57b4f"] Nov 26 06:30:00 crc kubenswrapper[4871]: E1126 06:30:00.177113 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88176307-a2eb-4a84-9abb-f88ee3635a6b" containerName="extract-content" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.177136 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="88176307-a2eb-4a84-9abb-f88ee3635a6b" containerName="extract-content" Nov 26 06:30:00 crc kubenswrapper[4871]: E1126 06:30:00.177155 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dca197c-9641-4f53-8249-38842a736ac0" containerName="extract-utilities" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.177165 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dca197c-9641-4f53-8249-38842a736ac0" containerName="extract-utilities" Nov 26 06:30:00 crc kubenswrapper[4871]: E1126 06:30:00.177194 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88176307-a2eb-4a84-9abb-f88ee3635a6b" containerName="extract-utilities" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.177203 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="88176307-a2eb-4a84-9abb-f88ee3635a6b" containerName="extract-utilities" Nov 26 06:30:00 crc kubenswrapper[4871]: E1126 06:30:00.177217 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae7307ba-fd30-45fd-9d67-b7a3688dcdb2" containerName="extract-utilities" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.177225 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae7307ba-fd30-45fd-9d67-b7a3688dcdb2" containerName="extract-utilities" Nov 26 06:30:00 crc kubenswrapper[4871]: E1126 06:30:00.177249 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88176307-a2eb-4a84-9abb-f88ee3635a6b" containerName="registry-server" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.177260 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="88176307-a2eb-4a84-9abb-f88ee3635a6b" containerName="registry-server" Nov 26 06:30:00 crc kubenswrapper[4871]: E1126 06:30:00.177278 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62e90694-5b6e-467a-908a-35ba613a1f08" containerName="extract-utilities" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.177288 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="62e90694-5b6e-467a-908a-35ba613a1f08" containerName="extract-utilities" Nov 26 06:30:00 crc kubenswrapper[4871]: E1126 06:30:00.177312 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae7307ba-fd30-45fd-9d67-b7a3688dcdb2" containerName="registry-server" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.177323 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae7307ba-fd30-45fd-9d67-b7a3688dcdb2" containerName="registry-server" Nov 26 06:30:00 crc kubenswrapper[4871]: E1126 06:30:00.177344 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62e90694-5b6e-467a-908a-35ba613a1f08" containerName="registry-server" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.177357 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="62e90694-5b6e-467a-908a-35ba613a1f08" containerName="registry-server" Nov 26 06:30:00 crc kubenswrapper[4871]: E1126 06:30:00.177375 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dca197c-9641-4f53-8249-38842a736ac0" containerName="extract-content" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.177385 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dca197c-9641-4f53-8249-38842a736ac0" containerName="extract-content" Nov 26 06:30:00 crc kubenswrapper[4871]: E1126 06:30:00.177402 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dca197c-9641-4f53-8249-38842a736ac0" containerName="registry-server" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.177410 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dca197c-9641-4f53-8249-38842a736ac0" containerName="registry-server" Nov 26 06:30:00 crc kubenswrapper[4871]: E1126 06:30:00.177425 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62e90694-5b6e-467a-908a-35ba613a1f08" containerName="extract-content" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.177435 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="62e90694-5b6e-467a-908a-35ba613a1f08" containerName="extract-content" Nov 26 06:30:00 crc kubenswrapper[4871]: E1126 06:30:00.177451 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae7307ba-fd30-45fd-9d67-b7a3688dcdb2" containerName="extract-content" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.177459 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae7307ba-fd30-45fd-9d67-b7a3688dcdb2" containerName="extract-content" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.177788 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="88176307-a2eb-4a84-9abb-f88ee3635a6b" containerName="registry-server" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.177819 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae7307ba-fd30-45fd-9d67-b7a3688dcdb2" containerName="registry-server" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.177837 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="5dca197c-9641-4f53-8249-38842a736ac0" containerName="registry-server" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.177871 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="62e90694-5b6e-467a-908a-35ba613a1f08" containerName="registry-server" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.178998 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402310-57b4f" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.182783 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.183156 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.189095 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402310-57b4f"] Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.273966 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/08e392e2-c8f5-4c76-baa8-8633938e5d7c-secret-volume\") pod \"collect-profiles-29402310-57b4f\" (UID: \"08e392e2-c8f5-4c76-baa8-8633938e5d7c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402310-57b4f" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.274059 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/08e392e2-c8f5-4c76-baa8-8633938e5d7c-config-volume\") pod \"collect-profiles-29402310-57b4f\" (UID: \"08e392e2-c8f5-4c76-baa8-8633938e5d7c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402310-57b4f" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.274619 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2mvz\" (UniqueName: \"kubernetes.io/projected/08e392e2-c8f5-4c76-baa8-8633938e5d7c-kube-api-access-t2mvz\") pod \"collect-profiles-29402310-57b4f\" (UID: \"08e392e2-c8f5-4c76-baa8-8633938e5d7c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402310-57b4f" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.376510 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/08e392e2-c8f5-4c76-baa8-8633938e5d7c-config-volume\") pod \"collect-profiles-29402310-57b4f\" (UID: \"08e392e2-c8f5-4c76-baa8-8633938e5d7c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402310-57b4f" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.376696 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2mvz\" (UniqueName: \"kubernetes.io/projected/08e392e2-c8f5-4c76-baa8-8633938e5d7c-kube-api-access-t2mvz\") pod \"collect-profiles-29402310-57b4f\" (UID: \"08e392e2-c8f5-4c76-baa8-8633938e5d7c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402310-57b4f" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.376743 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/08e392e2-c8f5-4c76-baa8-8633938e5d7c-secret-volume\") pod \"collect-profiles-29402310-57b4f\" (UID: \"08e392e2-c8f5-4c76-baa8-8633938e5d7c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402310-57b4f" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.378035 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/08e392e2-c8f5-4c76-baa8-8633938e5d7c-config-volume\") pod \"collect-profiles-29402310-57b4f\" (UID: \"08e392e2-c8f5-4c76-baa8-8633938e5d7c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402310-57b4f" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.382890 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/08e392e2-c8f5-4c76-baa8-8633938e5d7c-secret-volume\") pod \"collect-profiles-29402310-57b4f\" (UID: \"08e392e2-c8f5-4c76-baa8-8633938e5d7c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402310-57b4f" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.402592 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2mvz\" (UniqueName: \"kubernetes.io/projected/08e392e2-c8f5-4c76-baa8-8633938e5d7c-kube-api-access-t2mvz\") pod \"collect-profiles-29402310-57b4f\" (UID: \"08e392e2-c8f5-4c76-baa8-8633938e5d7c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402310-57b4f" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.506771 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402310-57b4f" Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.966095 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402310-57b4f"] Nov 26 06:30:00 crc kubenswrapper[4871]: I1126 06:30:00.978605 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402310-57b4f" event={"ID":"08e392e2-c8f5-4c76-baa8-8633938e5d7c","Type":"ContainerStarted","Data":"907e6322cd7d63c2fe6823b063af5c68a78270d0bb396b17049466f9393fc44e"} Nov 26 06:30:01 crc kubenswrapper[4871]: I1126 06:30:01.992977 4871 generic.go:334] "Generic (PLEG): container finished" podID="08e392e2-c8f5-4c76-baa8-8633938e5d7c" containerID="54528f430ef7b4772634511e05ce031a27c77f6d8e3641c745c8e9ca28f36672" exitCode=0 Nov 26 06:30:01 crc kubenswrapper[4871]: I1126 06:30:01.993075 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402310-57b4f" event={"ID":"08e392e2-c8f5-4c76-baa8-8633938e5d7c","Type":"ContainerDied","Data":"54528f430ef7b4772634511e05ce031a27c77f6d8e3641c745c8e9ca28f36672"} Nov 26 06:30:03 crc kubenswrapper[4871]: I1126 06:30:03.356754 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402310-57b4f" Nov 26 06:30:03 crc kubenswrapper[4871]: I1126 06:30:03.440031 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2mvz\" (UniqueName: \"kubernetes.io/projected/08e392e2-c8f5-4c76-baa8-8633938e5d7c-kube-api-access-t2mvz\") pod \"08e392e2-c8f5-4c76-baa8-8633938e5d7c\" (UID: \"08e392e2-c8f5-4c76-baa8-8633938e5d7c\") " Nov 26 06:30:03 crc kubenswrapper[4871]: I1126 06:30:03.440449 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/08e392e2-c8f5-4c76-baa8-8633938e5d7c-config-volume\") pod \"08e392e2-c8f5-4c76-baa8-8633938e5d7c\" (UID: \"08e392e2-c8f5-4c76-baa8-8633938e5d7c\") " Nov 26 06:30:03 crc kubenswrapper[4871]: I1126 06:30:03.440768 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/08e392e2-c8f5-4c76-baa8-8633938e5d7c-secret-volume\") pod \"08e392e2-c8f5-4c76-baa8-8633938e5d7c\" (UID: \"08e392e2-c8f5-4c76-baa8-8633938e5d7c\") " Nov 26 06:30:03 crc kubenswrapper[4871]: I1126 06:30:03.441149 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08e392e2-c8f5-4c76-baa8-8633938e5d7c-config-volume" (OuterVolumeSpecName: "config-volume") pod "08e392e2-c8f5-4c76-baa8-8633938e5d7c" (UID: "08e392e2-c8f5-4c76-baa8-8633938e5d7c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:30:03 crc kubenswrapper[4871]: I1126 06:30:03.441479 4871 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/08e392e2-c8f5-4c76-baa8-8633938e5d7c-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 06:30:03 crc kubenswrapper[4871]: I1126 06:30:03.445993 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08e392e2-c8f5-4c76-baa8-8633938e5d7c-kube-api-access-t2mvz" (OuterVolumeSpecName: "kube-api-access-t2mvz") pod "08e392e2-c8f5-4c76-baa8-8633938e5d7c" (UID: "08e392e2-c8f5-4c76-baa8-8633938e5d7c"). InnerVolumeSpecName "kube-api-access-t2mvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:30:03 crc kubenswrapper[4871]: I1126 06:30:03.446638 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08e392e2-c8f5-4c76-baa8-8633938e5d7c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "08e392e2-c8f5-4c76-baa8-8633938e5d7c" (UID: "08e392e2-c8f5-4c76-baa8-8633938e5d7c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:30:03 crc kubenswrapper[4871]: I1126 06:30:03.543640 4871 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/08e392e2-c8f5-4c76-baa8-8633938e5d7c-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 06:30:03 crc kubenswrapper[4871]: I1126 06:30:03.543672 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2mvz\" (UniqueName: \"kubernetes.io/projected/08e392e2-c8f5-4c76-baa8-8633938e5d7c-kube-api-access-t2mvz\") on node \"crc\" DevicePath \"\"" Nov 26 06:30:04 crc kubenswrapper[4871]: I1126 06:30:04.016647 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402310-57b4f" event={"ID":"08e392e2-c8f5-4c76-baa8-8633938e5d7c","Type":"ContainerDied","Data":"907e6322cd7d63c2fe6823b063af5c68a78270d0bb396b17049466f9393fc44e"} Nov 26 06:30:04 crc kubenswrapper[4871]: I1126 06:30:04.017599 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="907e6322cd7d63c2fe6823b063af5c68a78270d0bb396b17049466f9393fc44e" Nov 26 06:30:04 crc kubenswrapper[4871]: I1126 06:30:04.016686 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402310-57b4f" Nov 26 06:30:04 crc kubenswrapper[4871]: I1126 06:30:04.437302 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq"] Nov 26 06:30:04 crc kubenswrapper[4871]: I1126 06:30:04.446535 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402265-nhkhq"] Nov 26 06:30:04 crc kubenswrapper[4871]: I1126 06:30:04.519499 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="149882f2-ae3b-4571-a1ad-cbed765c2c77" path="/var/lib/kubelet/pods/149882f2-ae3b-4571-a1ad-cbed765c2c77/volumes" Nov 26 06:30:06 crc kubenswrapper[4871]: I1126 06:30:06.508675 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:30:06 crc kubenswrapper[4871]: E1126 06:30:06.509151 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:30:19 crc kubenswrapper[4871]: I1126 06:30:19.507571 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:30:19 crc kubenswrapper[4871]: E1126 06:30:19.508974 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:30:23 crc kubenswrapper[4871]: I1126 06:30:23.914929 4871 scope.go:117] "RemoveContainer" containerID="d053714db1665e915875858ece0e68e89ae5c5aea55a9824e7f07301df0b02dd" Nov 26 06:30:32 crc kubenswrapper[4871]: I1126 06:30:32.516924 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:30:32 crc kubenswrapper[4871]: E1126 06:30:32.517813 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:30:43 crc kubenswrapper[4871]: I1126 06:30:43.507100 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:30:43 crc kubenswrapper[4871]: E1126 06:30:43.508056 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:30:57 crc kubenswrapper[4871]: I1126 06:30:57.508059 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:30:57 crc kubenswrapper[4871]: E1126 06:30:57.508841 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:31:09 crc kubenswrapper[4871]: I1126 06:31:09.507292 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:31:09 crc kubenswrapper[4871]: E1126 06:31:09.508207 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:31:22 crc kubenswrapper[4871]: I1126 06:31:22.514630 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:31:22 crc kubenswrapper[4871]: E1126 06:31:22.515477 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:31:34 crc kubenswrapper[4871]: I1126 06:31:34.507218 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:31:35 crc kubenswrapper[4871]: I1126 06:31:35.073856 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"82a89614896222fd0545f77def74dd81122d48e02666ae4d8a09ea6872bbf47d"} Nov 26 06:32:12 crc kubenswrapper[4871]: I1126 06:32:12.004007 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-v2g4f"] Nov 26 06:32:12 crc kubenswrapper[4871]: E1126 06:32:12.005437 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08e392e2-c8f5-4c76-baa8-8633938e5d7c" containerName="collect-profiles" Nov 26 06:32:12 crc kubenswrapper[4871]: I1126 06:32:12.005455 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="08e392e2-c8f5-4c76-baa8-8633938e5d7c" containerName="collect-profiles" Nov 26 06:32:12 crc kubenswrapper[4871]: I1126 06:32:12.005804 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="08e392e2-c8f5-4c76-baa8-8633938e5d7c" containerName="collect-profiles" Nov 26 06:32:12 crc kubenswrapper[4871]: I1126 06:32:12.007609 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v2g4f" Nov 26 06:32:12 crc kubenswrapper[4871]: I1126 06:32:12.034319 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-v2g4f"] Nov 26 06:32:12 crc kubenswrapper[4871]: I1126 06:32:12.191687 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vq7w2\" (UniqueName: \"kubernetes.io/projected/423f0096-43c5-40ff-b858-da9192d3d136-kube-api-access-vq7w2\") pod \"redhat-marketplace-v2g4f\" (UID: \"423f0096-43c5-40ff-b858-da9192d3d136\") " pod="openshift-marketplace/redhat-marketplace-v2g4f" Nov 26 06:32:12 crc kubenswrapper[4871]: I1126 06:32:12.191760 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/423f0096-43c5-40ff-b858-da9192d3d136-catalog-content\") pod \"redhat-marketplace-v2g4f\" (UID: \"423f0096-43c5-40ff-b858-da9192d3d136\") " pod="openshift-marketplace/redhat-marketplace-v2g4f" Nov 26 06:32:12 crc kubenswrapper[4871]: I1126 06:32:12.191914 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/423f0096-43c5-40ff-b858-da9192d3d136-utilities\") pod \"redhat-marketplace-v2g4f\" (UID: \"423f0096-43c5-40ff-b858-da9192d3d136\") " pod="openshift-marketplace/redhat-marketplace-v2g4f" Nov 26 06:32:12 crc kubenswrapper[4871]: I1126 06:32:12.294020 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/423f0096-43c5-40ff-b858-da9192d3d136-utilities\") pod \"redhat-marketplace-v2g4f\" (UID: \"423f0096-43c5-40ff-b858-da9192d3d136\") " pod="openshift-marketplace/redhat-marketplace-v2g4f" Nov 26 06:32:12 crc kubenswrapper[4871]: I1126 06:32:12.294065 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vq7w2\" (UniqueName: \"kubernetes.io/projected/423f0096-43c5-40ff-b858-da9192d3d136-kube-api-access-vq7w2\") pod \"redhat-marketplace-v2g4f\" (UID: \"423f0096-43c5-40ff-b858-da9192d3d136\") " pod="openshift-marketplace/redhat-marketplace-v2g4f" Nov 26 06:32:12 crc kubenswrapper[4871]: I1126 06:32:12.294471 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/423f0096-43c5-40ff-b858-da9192d3d136-utilities\") pod \"redhat-marketplace-v2g4f\" (UID: \"423f0096-43c5-40ff-b858-da9192d3d136\") " pod="openshift-marketplace/redhat-marketplace-v2g4f" Nov 26 06:32:12 crc kubenswrapper[4871]: I1126 06:32:12.294581 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/423f0096-43c5-40ff-b858-da9192d3d136-catalog-content\") pod \"redhat-marketplace-v2g4f\" (UID: \"423f0096-43c5-40ff-b858-da9192d3d136\") " pod="openshift-marketplace/redhat-marketplace-v2g4f" Nov 26 06:32:12 crc kubenswrapper[4871]: I1126 06:32:12.295013 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/423f0096-43c5-40ff-b858-da9192d3d136-catalog-content\") pod \"redhat-marketplace-v2g4f\" (UID: \"423f0096-43c5-40ff-b858-da9192d3d136\") " pod="openshift-marketplace/redhat-marketplace-v2g4f" Nov 26 06:32:12 crc kubenswrapper[4871]: I1126 06:32:12.318194 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vq7w2\" (UniqueName: \"kubernetes.io/projected/423f0096-43c5-40ff-b858-da9192d3d136-kube-api-access-vq7w2\") pod \"redhat-marketplace-v2g4f\" (UID: \"423f0096-43c5-40ff-b858-da9192d3d136\") " pod="openshift-marketplace/redhat-marketplace-v2g4f" Nov 26 06:32:12 crc kubenswrapper[4871]: I1126 06:32:12.365798 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v2g4f" Nov 26 06:32:12 crc kubenswrapper[4871]: I1126 06:32:12.893344 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-v2g4f"] Nov 26 06:32:13 crc kubenswrapper[4871]: I1126 06:32:13.551736 4871 generic.go:334] "Generic (PLEG): container finished" podID="423f0096-43c5-40ff-b858-da9192d3d136" containerID="cdf6e84105c17ac25025b254337c6e2e2e4bfc81ce6311231a2af9b83f72e8fc" exitCode=0 Nov 26 06:32:13 crc kubenswrapper[4871]: I1126 06:32:13.551782 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v2g4f" event={"ID":"423f0096-43c5-40ff-b858-da9192d3d136","Type":"ContainerDied","Data":"cdf6e84105c17ac25025b254337c6e2e2e4bfc81ce6311231a2af9b83f72e8fc"} Nov 26 06:32:13 crc kubenswrapper[4871]: I1126 06:32:13.551978 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v2g4f" event={"ID":"423f0096-43c5-40ff-b858-da9192d3d136","Type":"ContainerStarted","Data":"7b6c43a8c4e50dca5ac753950b91dec6bde82b8352ada8d634ed55de6ce61f8d"} Nov 26 06:32:13 crc kubenswrapper[4871]: I1126 06:32:13.554806 4871 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 06:32:14 crc kubenswrapper[4871]: I1126 06:32:14.566079 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v2g4f" event={"ID":"423f0096-43c5-40ff-b858-da9192d3d136","Type":"ContainerStarted","Data":"543a75b9f806355b9a8eb56b1ab30554e10c4b444f30ab36c085e51bc26d7a49"} Nov 26 06:32:15 crc kubenswrapper[4871]: I1126 06:32:15.579007 4871 generic.go:334] "Generic (PLEG): container finished" podID="423f0096-43c5-40ff-b858-da9192d3d136" containerID="543a75b9f806355b9a8eb56b1ab30554e10c4b444f30ab36c085e51bc26d7a49" exitCode=0 Nov 26 06:32:15 crc kubenswrapper[4871]: I1126 06:32:15.579103 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v2g4f" event={"ID":"423f0096-43c5-40ff-b858-da9192d3d136","Type":"ContainerDied","Data":"543a75b9f806355b9a8eb56b1ab30554e10c4b444f30ab36c085e51bc26d7a49"} Nov 26 06:32:16 crc kubenswrapper[4871]: I1126 06:32:16.594931 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v2g4f" event={"ID":"423f0096-43c5-40ff-b858-da9192d3d136","Type":"ContainerStarted","Data":"7da668db569fb1782839e69d397473e231c09a19d753e6c5c4683c02107e350f"} Nov 26 06:32:16 crc kubenswrapper[4871]: I1126 06:32:16.625662 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-v2g4f" podStartSLOduration=3.117050393 podStartE2EDuration="5.625639214s" podCreationTimestamp="2025-11-26 06:32:11 +0000 UTC" firstStartedPulling="2025-11-26 06:32:13.554499527 +0000 UTC m=+3991.737551133" lastFinishedPulling="2025-11-26 06:32:16.063088358 +0000 UTC m=+3994.246139954" observedRunningTime="2025-11-26 06:32:16.616813185 +0000 UTC m=+3994.799864801" watchObservedRunningTime="2025-11-26 06:32:16.625639214 +0000 UTC m=+3994.808690800" Nov 26 06:32:16 crc kubenswrapper[4871]: I1126 06:32:16.986199 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4kmfq"] Nov 26 06:32:16 crc kubenswrapper[4871]: I1126 06:32:16.988858 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4kmfq" Nov 26 06:32:17 crc kubenswrapper[4871]: I1126 06:32:17.006766 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4kmfq"] Nov 26 06:32:17 crc kubenswrapper[4871]: I1126 06:32:17.110379 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed46157d-a3f5-42ad-8a23-f2bc83295a78-utilities\") pod \"certified-operators-4kmfq\" (UID: \"ed46157d-a3f5-42ad-8a23-f2bc83295a78\") " pod="openshift-marketplace/certified-operators-4kmfq" Nov 26 06:32:17 crc kubenswrapper[4871]: I1126 06:32:17.110848 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbrkn\" (UniqueName: \"kubernetes.io/projected/ed46157d-a3f5-42ad-8a23-f2bc83295a78-kube-api-access-vbrkn\") pod \"certified-operators-4kmfq\" (UID: \"ed46157d-a3f5-42ad-8a23-f2bc83295a78\") " pod="openshift-marketplace/certified-operators-4kmfq" Nov 26 06:32:17 crc kubenswrapper[4871]: I1126 06:32:17.110882 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed46157d-a3f5-42ad-8a23-f2bc83295a78-catalog-content\") pod \"certified-operators-4kmfq\" (UID: \"ed46157d-a3f5-42ad-8a23-f2bc83295a78\") " pod="openshift-marketplace/certified-operators-4kmfq" Nov 26 06:32:17 crc kubenswrapper[4871]: I1126 06:32:17.211796 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed46157d-a3f5-42ad-8a23-f2bc83295a78-utilities\") pod \"certified-operators-4kmfq\" (UID: \"ed46157d-a3f5-42ad-8a23-f2bc83295a78\") " pod="openshift-marketplace/certified-operators-4kmfq" Nov 26 06:32:17 crc kubenswrapper[4871]: I1126 06:32:17.211868 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbrkn\" (UniqueName: \"kubernetes.io/projected/ed46157d-a3f5-42ad-8a23-f2bc83295a78-kube-api-access-vbrkn\") pod \"certified-operators-4kmfq\" (UID: \"ed46157d-a3f5-42ad-8a23-f2bc83295a78\") " pod="openshift-marketplace/certified-operators-4kmfq" Nov 26 06:32:17 crc kubenswrapper[4871]: I1126 06:32:17.211889 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed46157d-a3f5-42ad-8a23-f2bc83295a78-catalog-content\") pod \"certified-operators-4kmfq\" (UID: \"ed46157d-a3f5-42ad-8a23-f2bc83295a78\") " pod="openshift-marketplace/certified-operators-4kmfq" Nov 26 06:32:17 crc kubenswrapper[4871]: I1126 06:32:17.212272 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed46157d-a3f5-42ad-8a23-f2bc83295a78-catalog-content\") pod \"certified-operators-4kmfq\" (UID: \"ed46157d-a3f5-42ad-8a23-f2bc83295a78\") " pod="openshift-marketplace/certified-operators-4kmfq" Nov 26 06:32:17 crc kubenswrapper[4871]: I1126 06:32:17.212489 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed46157d-a3f5-42ad-8a23-f2bc83295a78-utilities\") pod \"certified-operators-4kmfq\" (UID: \"ed46157d-a3f5-42ad-8a23-f2bc83295a78\") " pod="openshift-marketplace/certified-operators-4kmfq" Nov 26 06:32:17 crc kubenswrapper[4871]: I1126 06:32:17.231953 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbrkn\" (UniqueName: \"kubernetes.io/projected/ed46157d-a3f5-42ad-8a23-f2bc83295a78-kube-api-access-vbrkn\") pod \"certified-operators-4kmfq\" (UID: \"ed46157d-a3f5-42ad-8a23-f2bc83295a78\") " pod="openshift-marketplace/certified-operators-4kmfq" Nov 26 06:32:17 crc kubenswrapper[4871]: I1126 06:32:17.311775 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4kmfq" Nov 26 06:32:18 crc kubenswrapper[4871]: I1126 06:32:18.068848 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4kmfq"] Nov 26 06:32:18 crc kubenswrapper[4871]: I1126 06:32:18.612109 4871 generic.go:334] "Generic (PLEG): container finished" podID="ed46157d-a3f5-42ad-8a23-f2bc83295a78" containerID="a652961186a2e5162f723a7eb7f41a5b69796b80c7a2984f9d512e68e40c0c84" exitCode=0 Nov 26 06:32:18 crc kubenswrapper[4871]: I1126 06:32:18.612215 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4kmfq" event={"ID":"ed46157d-a3f5-42ad-8a23-f2bc83295a78","Type":"ContainerDied","Data":"a652961186a2e5162f723a7eb7f41a5b69796b80c7a2984f9d512e68e40c0c84"} Nov 26 06:32:18 crc kubenswrapper[4871]: I1126 06:32:18.612439 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4kmfq" event={"ID":"ed46157d-a3f5-42ad-8a23-f2bc83295a78","Type":"ContainerStarted","Data":"a0373e9c1e56a12524ff96c12e8b71bededadd0ad815bd605915b01e0fbf37a8"} Nov 26 06:32:20 crc kubenswrapper[4871]: I1126 06:32:20.630627 4871 generic.go:334] "Generic (PLEG): container finished" podID="ed46157d-a3f5-42ad-8a23-f2bc83295a78" containerID="a8387fe51d18a6e4f9245fa88c6301ba75f1f66615b2ab9c94d4adbd9cb21e6c" exitCode=0 Nov 26 06:32:20 crc kubenswrapper[4871]: I1126 06:32:20.630721 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4kmfq" event={"ID":"ed46157d-a3f5-42ad-8a23-f2bc83295a78","Type":"ContainerDied","Data":"a8387fe51d18a6e4f9245fa88c6301ba75f1f66615b2ab9c94d4adbd9cb21e6c"} Nov 26 06:32:21 crc kubenswrapper[4871]: I1126 06:32:21.643074 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4kmfq" event={"ID":"ed46157d-a3f5-42ad-8a23-f2bc83295a78","Type":"ContainerStarted","Data":"dd6acd55fb6f899fa3ee4432f6bbcdf8a02c931ccd2f2ed1513f32376b31d31c"} Nov 26 06:32:21 crc kubenswrapper[4871]: I1126 06:32:21.661958 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4kmfq" podStartSLOduration=3.020440572 podStartE2EDuration="5.661941339s" podCreationTimestamp="2025-11-26 06:32:16 +0000 UTC" firstStartedPulling="2025-11-26 06:32:18.613926773 +0000 UTC m=+3996.796978359" lastFinishedPulling="2025-11-26 06:32:21.2554275 +0000 UTC m=+3999.438479126" observedRunningTime="2025-11-26 06:32:21.661510949 +0000 UTC m=+3999.844562545" watchObservedRunningTime="2025-11-26 06:32:21.661941339 +0000 UTC m=+3999.844992935" Nov 26 06:32:22 crc kubenswrapper[4871]: I1126 06:32:22.366902 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-v2g4f" Nov 26 06:32:22 crc kubenswrapper[4871]: I1126 06:32:22.367142 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-v2g4f" Nov 26 06:32:22 crc kubenswrapper[4871]: I1126 06:32:22.436797 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-v2g4f" Nov 26 06:32:23 crc kubenswrapper[4871]: I1126 06:32:23.296983 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-v2g4f" Nov 26 06:32:23 crc kubenswrapper[4871]: I1126 06:32:23.561787 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-v2g4f"] Nov 26 06:32:24 crc kubenswrapper[4871]: I1126 06:32:24.671816 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-v2g4f" podUID="423f0096-43c5-40ff-b858-da9192d3d136" containerName="registry-server" containerID="cri-o://7da668db569fb1782839e69d397473e231c09a19d753e6c5c4683c02107e350f" gracePeriod=2 Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.446369 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v2g4f" Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.593254 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/423f0096-43c5-40ff-b858-da9192d3d136-utilities\") pod \"423f0096-43c5-40ff-b858-da9192d3d136\" (UID: \"423f0096-43c5-40ff-b858-da9192d3d136\") " Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.593416 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/423f0096-43c5-40ff-b858-da9192d3d136-catalog-content\") pod \"423f0096-43c5-40ff-b858-da9192d3d136\" (UID: \"423f0096-43c5-40ff-b858-da9192d3d136\") " Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.593597 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vq7w2\" (UniqueName: \"kubernetes.io/projected/423f0096-43c5-40ff-b858-da9192d3d136-kube-api-access-vq7w2\") pod \"423f0096-43c5-40ff-b858-da9192d3d136\" (UID: \"423f0096-43c5-40ff-b858-da9192d3d136\") " Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.594652 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/423f0096-43c5-40ff-b858-da9192d3d136-utilities" (OuterVolumeSpecName: "utilities") pod "423f0096-43c5-40ff-b858-da9192d3d136" (UID: "423f0096-43c5-40ff-b858-da9192d3d136"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.594805 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/423f0096-43c5-40ff-b858-da9192d3d136-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.610268 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/423f0096-43c5-40ff-b858-da9192d3d136-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "423f0096-43c5-40ff-b858-da9192d3d136" (UID: "423f0096-43c5-40ff-b858-da9192d3d136"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.657923 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/423f0096-43c5-40ff-b858-da9192d3d136-kube-api-access-vq7w2" (OuterVolumeSpecName: "kube-api-access-vq7w2") pod "423f0096-43c5-40ff-b858-da9192d3d136" (UID: "423f0096-43c5-40ff-b858-da9192d3d136"). InnerVolumeSpecName "kube-api-access-vq7w2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.681091 4871 generic.go:334] "Generic (PLEG): container finished" podID="423f0096-43c5-40ff-b858-da9192d3d136" containerID="7da668db569fb1782839e69d397473e231c09a19d753e6c5c4683c02107e350f" exitCode=0 Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.681143 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v2g4f" event={"ID":"423f0096-43c5-40ff-b858-da9192d3d136","Type":"ContainerDied","Data":"7da668db569fb1782839e69d397473e231c09a19d753e6c5c4683c02107e350f"} Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.681171 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v2g4f" event={"ID":"423f0096-43c5-40ff-b858-da9192d3d136","Type":"ContainerDied","Data":"7b6c43a8c4e50dca5ac753950b91dec6bde82b8352ada8d634ed55de6ce61f8d"} Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.681187 4871 scope.go:117] "RemoveContainer" containerID="7da668db569fb1782839e69d397473e231c09a19d753e6c5c4683c02107e350f" Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.681337 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v2g4f" Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.696754 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vq7w2\" (UniqueName: \"kubernetes.io/projected/423f0096-43c5-40ff-b858-da9192d3d136-kube-api-access-vq7w2\") on node \"crc\" DevicePath \"\"" Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.696788 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/423f0096-43c5-40ff-b858-da9192d3d136-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.722172 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-v2g4f"] Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.724701 4871 scope.go:117] "RemoveContainer" containerID="543a75b9f806355b9a8eb56b1ab30554e10c4b444f30ab36c085e51bc26d7a49" Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.734606 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-v2g4f"] Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.748183 4871 scope.go:117] "RemoveContainer" containerID="cdf6e84105c17ac25025b254337c6e2e2e4bfc81ce6311231a2af9b83f72e8fc" Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.809766 4871 scope.go:117] "RemoveContainer" containerID="7da668db569fb1782839e69d397473e231c09a19d753e6c5c4683c02107e350f" Nov 26 06:32:25 crc kubenswrapper[4871]: E1126 06:32:25.810170 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7da668db569fb1782839e69d397473e231c09a19d753e6c5c4683c02107e350f\": container with ID starting with 7da668db569fb1782839e69d397473e231c09a19d753e6c5c4683c02107e350f not found: ID does not exist" containerID="7da668db569fb1782839e69d397473e231c09a19d753e6c5c4683c02107e350f" Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.810198 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7da668db569fb1782839e69d397473e231c09a19d753e6c5c4683c02107e350f"} err="failed to get container status \"7da668db569fb1782839e69d397473e231c09a19d753e6c5c4683c02107e350f\": rpc error: code = NotFound desc = could not find container \"7da668db569fb1782839e69d397473e231c09a19d753e6c5c4683c02107e350f\": container with ID starting with 7da668db569fb1782839e69d397473e231c09a19d753e6c5c4683c02107e350f not found: ID does not exist" Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.810217 4871 scope.go:117] "RemoveContainer" containerID="543a75b9f806355b9a8eb56b1ab30554e10c4b444f30ab36c085e51bc26d7a49" Nov 26 06:32:25 crc kubenswrapper[4871]: E1126 06:32:25.810576 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"543a75b9f806355b9a8eb56b1ab30554e10c4b444f30ab36c085e51bc26d7a49\": container with ID starting with 543a75b9f806355b9a8eb56b1ab30554e10c4b444f30ab36c085e51bc26d7a49 not found: ID does not exist" containerID="543a75b9f806355b9a8eb56b1ab30554e10c4b444f30ab36c085e51bc26d7a49" Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.810603 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"543a75b9f806355b9a8eb56b1ab30554e10c4b444f30ab36c085e51bc26d7a49"} err="failed to get container status \"543a75b9f806355b9a8eb56b1ab30554e10c4b444f30ab36c085e51bc26d7a49\": rpc error: code = NotFound desc = could not find container \"543a75b9f806355b9a8eb56b1ab30554e10c4b444f30ab36c085e51bc26d7a49\": container with ID starting with 543a75b9f806355b9a8eb56b1ab30554e10c4b444f30ab36c085e51bc26d7a49 not found: ID does not exist" Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.810622 4871 scope.go:117] "RemoveContainer" containerID="cdf6e84105c17ac25025b254337c6e2e2e4bfc81ce6311231a2af9b83f72e8fc" Nov 26 06:32:25 crc kubenswrapper[4871]: E1126 06:32:25.811470 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cdf6e84105c17ac25025b254337c6e2e2e4bfc81ce6311231a2af9b83f72e8fc\": container with ID starting with cdf6e84105c17ac25025b254337c6e2e2e4bfc81ce6311231a2af9b83f72e8fc not found: ID does not exist" containerID="cdf6e84105c17ac25025b254337c6e2e2e4bfc81ce6311231a2af9b83f72e8fc" Nov 26 06:32:25 crc kubenswrapper[4871]: I1126 06:32:25.811492 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdf6e84105c17ac25025b254337c6e2e2e4bfc81ce6311231a2af9b83f72e8fc"} err="failed to get container status \"cdf6e84105c17ac25025b254337c6e2e2e4bfc81ce6311231a2af9b83f72e8fc\": rpc error: code = NotFound desc = could not find container \"cdf6e84105c17ac25025b254337c6e2e2e4bfc81ce6311231a2af9b83f72e8fc\": container with ID starting with cdf6e84105c17ac25025b254337c6e2e2e4bfc81ce6311231a2af9b83f72e8fc not found: ID does not exist" Nov 26 06:32:26 crc kubenswrapper[4871]: I1126 06:32:26.526068 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="423f0096-43c5-40ff-b858-da9192d3d136" path="/var/lib/kubelet/pods/423f0096-43c5-40ff-b858-da9192d3d136/volumes" Nov 26 06:32:27 crc kubenswrapper[4871]: I1126 06:32:27.312835 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4kmfq" Nov 26 06:32:27 crc kubenswrapper[4871]: I1126 06:32:27.312889 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4kmfq" Nov 26 06:32:27 crc kubenswrapper[4871]: I1126 06:32:27.379500 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4kmfq" Nov 26 06:32:27 crc kubenswrapper[4871]: I1126 06:32:27.773659 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4kmfq" Nov 26 06:32:28 crc kubenswrapper[4871]: I1126 06:32:28.961745 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4kmfq"] Nov 26 06:32:29 crc kubenswrapper[4871]: I1126 06:32:29.725065 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4kmfq" podUID="ed46157d-a3f5-42ad-8a23-f2bc83295a78" containerName="registry-server" containerID="cri-o://dd6acd55fb6f899fa3ee4432f6bbcdf8a02c931ccd2f2ed1513f32376b31d31c" gracePeriod=2 Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.251381 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4kmfq" Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.429390 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed46157d-a3f5-42ad-8a23-f2bc83295a78-utilities\") pod \"ed46157d-a3f5-42ad-8a23-f2bc83295a78\" (UID: \"ed46157d-a3f5-42ad-8a23-f2bc83295a78\") " Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.429582 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vbrkn\" (UniqueName: \"kubernetes.io/projected/ed46157d-a3f5-42ad-8a23-f2bc83295a78-kube-api-access-vbrkn\") pod \"ed46157d-a3f5-42ad-8a23-f2bc83295a78\" (UID: \"ed46157d-a3f5-42ad-8a23-f2bc83295a78\") " Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.429698 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed46157d-a3f5-42ad-8a23-f2bc83295a78-catalog-content\") pod \"ed46157d-a3f5-42ad-8a23-f2bc83295a78\" (UID: \"ed46157d-a3f5-42ad-8a23-f2bc83295a78\") " Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.430320 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed46157d-a3f5-42ad-8a23-f2bc83295a78-utilities" (OuterVolumeSpecName: "utilities") pod "ed46157d-a3f5-42ad-8a23-f2bc83295a78" (UID: "ed46157d-a3f5-42ad-8a23-f2bc83295a78"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.435981 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed46157d-a3f5-42ad-8a23-f2bc83295a78-kube-api-access-vbrkn" (OuterVolumeSpecName: "kube-api-access-vbrkn") pod "ed46157d-a3f5-42ad-8a23-f2bc83295a78" (UID: "ed46157d-a3f5-42ad-8a23-f2bc83295a78"). InnerVolumeSpecName "kube-api-access-vbrkn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.482918 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed46157d-a3f5-42ad-8a23-f2bc83295a78-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ed46157d-a3f5-42ad-8a23-f2bc83295a78" (UID: "ed46157d-a3f5-42ad-8a23-f2bc83295a78"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.531409 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed46157d-a3f5-42ad-8a23-f2bc83295a78-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.531440 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed46157d-a3f5-42ad-8a23-f2bc83295a78-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.531451 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vbrkn\" (UniqueName: \"kubernetes.io/projected/ed46157d-a3f5-42ad-8a23-f2bc83295a78-kube-api-access-vbrkn\") on node \"crc\" DevicePath \"\"" Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.745877 4871 generic.go:334] "Generic (PLEG): container finished" podID="ed46157d-a3f5-42ad-8a23-f2bc83295a78" containerID="dd6acd55fb6f899fa3ee4432f6bbcdf8a02c931ccd2f2ed1513f32376b31d31c" exitCode=0 Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.745962 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4kmfq" event={"ID":"ed46157d-a3f5-42ad-8a23-f2bc83295a78","Type":"ContainerDied","Data":"dd6acd55fb6f899fa3ee4432f6bbcdf8a02c931ccd2f2ed1513f32376b31d31c"} Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.745998 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4kmfq" Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.746010 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4kmfq" event={"ID":"ed46157d-a3f5-42ad-8a23-f2bc83295a78","Type":"ContainerDied","Data":"a0373e9c1e56a12524ff96c12e8b71bededadd0ad815bd605915b01e0fbf37a8"} Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.746098 4871 scope.go:117] "RemoveContainer" containerID="dd6acd55fb6f899fa3ee4432f6bbcdf8a02c931ccd2f2ed1513f32376b31d31c" Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.785682 4871 scope.go:117] "RemoveContainer" containerID="a8387fe51d18a6e4f9245fa88c6301ba75f1f66615b2ab9c94d4adbd9cb21e6c" Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.787627 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4kmfq"] Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.798280 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4kmfq"] Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.815803 4871 scope.go:117] "RemoveContainer" containerID="a652961186a2e5162f723a7eb7f41a5b69796b80c7a2984f9d512e68e40c0c84" Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.888758 4871 scope.go:117] "RemoveContainer" containerID="dd6acd55fb6f899fa3ee4432f6bbcdf8a02c931ccd2f2ed1513f32376b31d31c" Nov 26 06:32:30 crc kubenswrapper[4871]: E1126 06:32:30.889580 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd6acd55fb6f899fa3ee4432f6bbcdf8a02c931ccd2f2ed1513f32376b31d31c\": container with ID starting with dd6acd55fb6f899fa3ee4432f6bbcdf8a02c931ccd2f2ed1513f32376b31d31c not found: ID does not exist" containerID="dd6acd55fb6f899fa3ee4432f6bbcdf8a02c931ccd2f2ed1513f32376b31d31c" Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.889620 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd6acd55fb6f899fa3ee4432f6bbcdf8a02c931ccd2f2ed1513f32376b31d31c"} err="failed to get container status \"dd6acd55fb6f899fa3ee4432f6bbcdf8a02c931ccd2f2ed1513f32376b31d31c\": rpc error: code = NotFound desc = could not find container \"dd6acd55fb6f899fa3ee4432f6bbcdf8a02c931ccd2f2ed1513f32376b31d31c\": container with ID starting with dd6acd55fb6f899fa3ee4432f6bbcdf8a02c931ccd2f2ed1513f32376b31d31c not found: ID does not exist" Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.889646 4871 scope.go:117] "RemoveContainer" containerID="a8387fe51d18a6e4f9245fa88c6301ba75f1f66615b2ab9c94d4adbd9cb21e6c" Nov 26 06:32:30 crc kubenswrapper[4871]: E1126 06:32:30.890166 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8387fe51d18a6e4f9245fa88c6301ba75f1f66615b2ab9c94d4adbd9cb21e6c\": container with ID starting with a8387fe51d18a6e4f9245fa88c6301ba75f1f66615b2ab9c94d4adbd9cb21e6c not found: ID does not exist" containerID="a8387fe51d18a6e4f9245fa88c6301ba75f1f66615b2ab9c94d4adbd9cb21e6c" Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.890237 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8387fe51d18a6e4f9245fa88c6301ba75f1f66615b2ab9c94d4adbd9cb21e6c"} err="failed to get container status \"a8387fe51d18a6e4f9245fa88c6301ba75f1f66615b2ab9c94d4adbd9cb21e6c\": rpc error: code = NotFound desc = could not find container \"a8387fe51d18a6e4f9245fa88c6301ba75f1f66615b2ab9c94d4adbd9cb21e6c\": container with ID starting with a8387fe51d18a6e4f9245fa88c6301ba75f1f66615b2ab9c94d4adbd9cb21e6c not found: ID does not exist" Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.890282 4871 scope.go:117] "RemoveContainer" containerID="a652961186a2e5162f723a7eb7f41a5b69796b80c7a2984f9d512e68e40c0c84" Nov 26 06:32:30 crc kubenswrapper[4871]: E1126 06:32:30.890732 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a652961186a2e5162f723a7eb7f41a5b69796b80c7a2984f9d512e68e40c0c84\": container with ID starting with a652961186a2e5162f723a7eb7f41a5b69796b80c7a2984f9d512e68e40c0c84 not found: ID does not exist" containerID="a652961186a2e5162f723a7eb7f41a5b69796b80c7a2984f9d512e68e40c0c84" Nov 26 06:32:30 crc kubenswrapper[4871]: I1126 06:32:30.890765 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a652961186a2e5162f723a7eb7f41a5b69796b80c7a2984f9d512e68e40c0c84"} err="failed to get container status \"a652961186a2e5162f723a7eb7f41a5b69796b80c7a2984f9d512e68e40c0c84\": rpc error: code = NotFound desc = could not find container \"a652961186a2e5162f723a7eb7f41a5b69796b80c7a2984f9d512e68e40c0c84\": container with ID starting with a652961186a2e5162f723a7eb7f41a5b69796b80c7a2984f9d512e68e40c0c84 not found: ID does not exist" Nov 26 06:32:32 crc kubenswrapper[4871]: I1126 06:32:32.520918 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed46157d-a3f5-42ad-8a23-f2bc83295a78" path="/var/lib/kubelet/pods/ed46157d-a3f5-42ad-8a23-f2bc83295a78/volumes" Nov 26 06:33:53 crc kubenswrapper[4871]: I1126 06:33:53.614756 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:33:53 crc kubenswrapper[4871]: I1126 06:33:53.615492 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:34:23 crc kubenswrapper[4871]: I1126 06:34:23.614849 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:34:23 crc kubenswrapper[4871]: I1126 06:34:23.615459 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:34:39 crc kubenswrapper[4871]: I1126 06:34:39.546320 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pvf75"] Nov 26 06:34:39 crc kubenswrapper[4871]: E1126 06:34:39.547166 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="423f0096-43c5-40ff-b858-da9192d3d136" containerName="extract-utilities" Nov 26 06:34:39 crc kubenswrapper[4871]: I1126 06:34:39.547180 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="423f0096-43c5-40ff-b858-da9192d3d136" containerName="extract-utilities" Nov 26 06:34:39 crc kubenswrapper[4871]: E1126 06:34:39.547202 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed46157d-a3f5-42ad-8a23-f2bc83295a78" containerName="extract-utilities" Nov 26 06:34:39 crc kubenswrapper[4871]: I1126 06:34:39.547211 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed46157d-a3f5-42ad-8a23-f2bc83295a78" containerName="extract-utilities" Nov 26 06:34:39 crc kubenswrapper[4871]: E1126 06:34:39.547228 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="423f0096-43c5-40ff-b858-da9192d3d136" containerName="registry-server" Nov 26 06:34:39 crc kubenswrapper[4871]: I1126 06:34:39.547237 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="423f0096-43c5-40ff-b858-da9192d3d136" containerName="registry-server" Nov 26 06:34:39 crc kubenswrapper[4871]: E1126 06:34:39.547251 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed46157d-a3f5-42ad-8a23-f2bc83295a78" containerName="registry-server" Nov 26 06:34:39 crc kubenswrapper[4871]: I1126 06:34:39.547257 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed46157d-a3f5-42ad-8a23-f2bc83295a78" containerName="registry-server" Nov 26 06:34:39 crc kubenswrapper[4871]: E1126 06:34:39.547270 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed46157d-a3f5-42ad-8a23-f2bc83295a78" containerName="extract-content" Nov 26 06:34:39 crc kubenswrapper[4871]: I1126 06:34:39.547276 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed46157d-a3f5-42ad-8a23-f2bc83295a78" containerName="extract-content" Nov 26 06:34:39 crc kubenswrapper[4871]: E1126 06:34:39.547304 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="423f0096-43c5-40ff-b858-da9192d3d136" containerName="extract-content" Nov 26 06:34:39 crc kubenswrapper[4871]: I1126 06:34:39.547311 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="423f0096-43c5-40ff-b858-da9192d3d136" containerName="extract-content" Nov 26 06:34:39 crc kubenswrapper[4871]: I1126 06:34:39.547549 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="423f0096-43c5-40ff-b858-da9192d3d136" containerName="registry-server" Nov 26 06:34:39 crc kubenswrapper[4871]: I1126 06:34:39.547577 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed46157d-a3f5-42ad-8a23-f2bc83295a78" containerName="registry-server" Nov 26 06:34:39 crc kubenswrapper[4871]: I1126 06:34:39.549108 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pvf75" Nov 26 06:34:39 crc kubenswrapper[4871]: I1126 06:34:39.567142 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pvf75"] Nov 26 06:34:39 crc kubenswrapper[4871]: I1126 06:34:39.592024 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aa1d735-783e-464f-b3f2-4c73f77cc805-catalog-content\") pod \"redhat-operators-pvf75\" (UID: \"6aa1d735-783e-464f-b3f2-4c73f77cc805\") " pod="openshift-marketplace/redhat-operators-pvf75" Nov 26 06:34:39 crc kubenswrapper[4871]: I1126 06:34:39.592079 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7wxn\" (UniqueName: \"kubernetes.io/projected/6aa1d735-783e-464f-b3f2-4c73f77cc805-kube-api-access-v7wxn\") pod \"redhat-operators-pvf75\" (UID: \"6aa1d735-783e-464f-b3f2-4c73f77cc805\") " pod="openshift-marketplace/redhat-operators-pvf75" Nov 26 06:34:39 crc kubenswrapper[4871]: I1126 06:34:39.592297 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aa1d735-783e-464f-b3f2-4c73f77cc805-utilities\") pod \"redhat-operators-pvf75\" (UID: \"6aa1d735-783e-464f-b3f2-4c73f77cc805\") " pod="openshift-marketplace/redhat-operators-pvf75" Nov 26 06:34:39 crc kubenswrapper[4871]: I1126 06:34:39.694320 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aa1d735-783e-464f-b3f2-4c73f77cc805-utilities\") pod \"redhat-operators-pvf75\" (UID: \"6aa1d735-783e-464f-b3f2-4c73f77cc805\") " pod="openshift-marketplace/redhat-operators-pvf75" Nov 26 06:34:39 crc kubenswrapper[4871]: I1126 06:34:39.694453 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aa1d735-783e-464f-b3f2-4c73f77cc805-catalog-content\") pod \"redhat-operators-pvf75\" (UID: \"6aa1d735-783e-464f-b3f2-4c73f77cc805\") " pod="openshift-marketplace/redhat-operators-pvf75" Nov 26 06:34:39 crc kubenswrapper[4871]: I1126 06:34:39.694482 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7wxn\" (UniqueName: \"kubernetes.io/projected/6aa1d735-783e-464f-b3f2-4c73f77cc805-kube-api-access-v7wxn\") pod \"redhat-operators-pvf75\" (UID: \"6aa1d735-783e-464f-b3f2-4c73f77cc805\") " pod="openshift-marketplace/redhat-operators-pvf75" Nov 26 06:34:39 crc kubenswrapper[4871]: I1126 06:34:39.694889 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aa1d735-783e-464f-b3f2-4c73f77cc805-utilities\") pod \"redhat-operators-pvf75\" (UID: \"6aa1d735-783e-464f-b3f2-4c73f77cc805\") " pod="openshift-marketplace/redhat-operators-pvf75" Nov 26 06:34:39 crc kubenswrapper[4871]: I1126 06:34:39.695093 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aa1d735-783e-464f-b3f2-4c73f77cc805-catalog-content\") pod \"redhat-operators-pvf75\" (UID: \"6aa1d735-783e-464f-b3f2-4c73f77cc805\") " pod="openshift-marketplace/redhat-operators-pvf75" Nov 26 06:34:40 crc kubenswrapper[4871]: I1126 06:34:40.258438 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7wxn\" (UniqueName: \"kubernetes.io/projected/6aa1d735-783e-464f-b3f2-4c73f77cc805-kube-api-access-v7wxn\") pod \"redhat-operators-pvf75\" (UID: \"6aa1d735-783e-464f-b3f2-4c73f77cc805\") " pod="openshift-marketplace/redhat-operators-pvf75" Nov 26 06:34:40 crc kubenswrapper[4871]: I1126 06:34:40.470076 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pvf75" Nov 26 06:34:41 crc kubenswrapper[4871]: I1126 06:34:41.019028 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pvf75"] Nov 26 06:34:41 crc kubenswrapper[4871]: I1126 06:34:41.260096 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvf75" event={"ID":"6aa1d735-783e-464f-b3f2-4c73f77cc805","Type":"ContainerStarted","Data":"9a9fb6dc7a58bbe2aaf85386cdae06ba3289ef45a7be9343d010a1dfeee9f2e1"} Nov 26 06:34:41 crc kubenswrapper[4871]: I1126 06:34:41.260146 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvf75" event={"ID":"6aa1d735-783e-464f-b3f2-4c73f77cc805","Type":"ContainerStarted","Data":"44ccc34c348b775d5d2a2147f6fe5e4df8e89b1374b25e8b8262df2d1d4221d1"} Nov 26 06:34:42 crc kubenswrapper[4871]: I1126 06:34:42.277972 4871 generic.go:334] "Generic (PLEG): container finished" podID="6aa1d735-783e-464f-b3f2-4c73f77cc805" containerID="9a9fb6dc7a58bbe2aaf85386cdae06ba3289ef45a7be9343d010a1dfeee9f2e1" exitCode=0 Nov 26 06:34:42 crc kubenswrapper[4871]: I1126 06:34:42.278023 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvf75" event={"ID":"6aa1d735-783e-464f-b3f2-4c73f77cc805","Type":"ContainerDied","Data":"9a9fb6dc7a58bbe2aaf85386cdae06ba3289ef45a7be9343d010a1dfeee9f2e1"} Nov 26 06:34:43 crc kubenswrapper[4871]: I1126 06:34:43.291165 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvf75" event={"ID":"6aa1d735-783e-464f-b3f2-4c73f77cc805","Type":"ContainerStarted","Data":"d381d4cc7a0869e0a009e81365708c06b91104e894c5b053d4626759fbc83a26"} Nov 26 06:34:46 crc kubenswrapper[4871]: I1126 06:34:46.334155 4871 generic.go:334] "Generic (PLEG): container finished" podID="6aa1d735-783e-464f-b3f2-4c73f77cc805" containerID="d381d4cc7a0869e0a009e81365708c06b91104e894c5b053d4626759fbc83a26" exitCode=0 Nov 26 06:34:46 crc kubenswrapper[4871]: I1126 06:34:46.334211 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvf75" event={"ID":"6aa1d735-783e-464f-b3f2-4c73f77cc805","Type":"ContainerDied","Data":"d381d4cc7a0869e0a009e81365708c06b91104e894c5b053d4626759fbc83a26"} Nov 26 06:34:47 crc kubenswrapper[4871]: I1126 06:34:47.322578 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jbrm2"] Nov 26 06:34:47 crc kubenswrapper[4871]: I1126 06:34:47.325635 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jbrm2" Nov 26 06:34:47 crc kubenswrapper[4871]: I1126 06:34:47.351117 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jbrm2"] Nov 26 06:34:47 crc kubenswrapper[4871]: I1126 06:34:47.355156 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvf75" event={"ID":"6aa1d735-783e-464f-b3f2-4c73f77cc805","Type":"ContainerStarted","Data":"817ac951666e7b370808716c00472eec73245f9368e1d87860664690a9c011a5"} Nov 26 06:34:47 crc kubenswrapper[4871]: I1126 06:34:47.382511 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pvf75" podStartSLOduration=3.7415000000000003 podStartE2EDuration="8.382487005s" podCreationTimestamp="2025-11-26 06:34:39 +0000 UTC" firstStartedPulling="2025-11-26 06:34:42.281741511 +0000 UTC m=+4140.464793097" lastFinishedPulling="2025-11-26 06:34:46.922728516 +0000 UTC m=+4145.105780102" observedRunningTime="2025-11-26 06:34:47.379712337 +0000 UTC m=+4145.562763923" watchObservedRunningTime="2025-11-26 06:34:47.382487005 +0000 UTC m=+4145.565538641" Nov 26 06:34:47 crc kubenswrapper[4871]: I1126 06:34:47.474610 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zphd\" (UniqueName: \"kubernetes.io/projected/771e97ca-9217-4ee6-a59c-87aeef828db8-kube-api-access-7zphd\") pod \"community-operators-jbrm2\" (UID: \"771e97ca-9217-4ee6-a59c-87aeef828db8\") " pod="openshift-marketplace/community-operators-jbrm2" Nov 26 06:34:47 crc kubenswrapper[4871]: I1126 06:34:47.474992 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/771e97ca-9217-4ee6-a59c-87aeef828db8-catalog-content\") pod \"community-operators-jbrm2\" (UID: \"771e97ca-9217-4ee6-a59c-87aeef828db8\") " pod="openshift-marketplace/community-operators-jbrm2" Nov 26 06:34:47 crc kubenswrapper[4871]: I1126 06:34:47.475075 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/771e97ca-9217-4ee6-a59c-87aeef828db8-utilities\") pod \"community-operators-jbrm2\" (UID: \"771e97ca-9217-4ee6-a59c-87aeef828db8\") " pod="openshift-marketplace/community-operators-jbrm2" Nov 26 06:34:47 crc kubenswrapper[4871]: I1126 06:34:47.577372 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/771e97ca-9217-4ee6-a59c-87aeef828db8-catalog-content\") pod \"community-operators-jbrm2\" (UID: \"771e97ca-9217-4ee6-a59c-87aeef828db8\") " pod="openshift-marketplace/community-operators-jbrm2" Nov 26 06:34:47 crc kubenswrapper[4871]: I1126 06:34:47.577511 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/771e97ca-9217-4ee6-a59c-87aeef828db8-utilities\") pod \"community-operators-jbrm2\" (UID: \"771e97ca-9217-4ee6-a59c-87aeef828db8\") " pod="openshift-marketplace/community-operators-jbrm2" Nov 26 06:34:47 crc kubenswrapper[4871]: I1126 06:34:47.577796 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zphd\" (UniqueName: \"kubernetes.io/projected/771e97ca-9217-4ee6-a59c-87aeef828db8-kube-api-access-7zphd\") pod \"community-operators-jbrm2\" (UID: \"771e97ca-9217-4ee6-a59c-87aeef828db8\") " pod="openshift-marketplace/community-operators-jbrm2" Nov 26 06:34:47 crc kubenswrapper[4871]: I1126 06:34:47.578043 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/771e97ca-9217-4ee6-a59c-87aeef828db8-catalog-content\") pod \"community-operators-jbrm2\" (UID: \"771e97ca-9217-4ee6-a59c-87aeef828db8\") " pod="openshift-marketplace/community-operators-jbrm2" Nov 26 06:34:47 crc kubenswrapper[4871]: I1126 06:34:47.578131 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/771e97ca-9217-4ee6-a59c-87aeef828db8-utilities\") pod \"community-operators-jbrm2\" (UID: \"771e97ca-9217-4ee6-a59c-87aeef828db8\") " pod="openshift-marketplace/community-operators-jbrm2" Nov 26 06:34:47 crc kubenswrapper[4871]: I1126 06:34:47.609302 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zphd\" (UniqueName: \"kubernetes.io/projected/771e97ca-9217-4ee6-a59c-87aeef828db8-kube-api-access-7zphd\") pod \"community-operators-jbrm2\" (UID: \"771e97ca-9217-4ee6-a59c-87aeef828db8\") " pod="openshift-marketplace/community-operators-jbrm2" Nov 26 06:34:47 crc kubenswrapper[4871]: I1126 06:34:47.661358 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jbrm2" Nov 26 06:34:48 crc kubenswrapper[4871]: I1126 06:34:48.255287 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jbrm2"] Nov 26 06:34:48 crc kubenswrapper[4871]: I1126 06:34:48.371126 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jbrm2" event={"ID":"771e97ca-9217-4ee6-a59c-87aeef828db8","Type":"ContainerStarted","Data":"a916ec90f57dcb6169410d263577e93a0065a02e785686ab3364689946bb65ad"} Nov 26 06:34:49 crc kubenswrapper[4871]: I1126 06:34:49.383672 4871 generic.go:334] "Generic (PLEG): container finished" podID="771e97ca-9217-4ee6-a59c-87aeef828db8" containerID="d0367d1f5046bb158d018bc2a721ff4c8c346fe4dd33c874796a9e205384c74f" exitCode=0 Nov 26 06:34:49 crc kubenswrapper[4871]: I1126 06:34:49.383725 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jbrm2" event={"ID":"771e97ca-9217-4ee6-a59c-87aeef828db8","Type":"ContainerDied","Data":"d0367d1f5046bb158d018bc2a721ff4c8c346fe4dd33c874796a9e205384c74f"} Nov 26 06:34:50 crc kubenswrapper[4871]: I1126 06:34:50.395792 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jbrm2" event={"ID":"771e97ca-9217-4ee6-a59c-87aeef828db8","Type":"ContainerStarted","Data":"ba6783891e0e85ad9379e8e5661b7a65663472ef666882c46071427574f5c1ed"} Nov 26 06:34:50 crc kubenswrapper[4871]: I1126 06:34:50.471159 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pvf75" Nov 26 06:34:50 crc kubenswrapper[4871]: I1126 06:34:50.471219 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pvf75" Nov 26 06:34:51 crc kubenswrapper[4871]: I1126 06:34:51.533705 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pvf75" podUID="6aa1d735-783e-464f-b3f2-4c73f77cc805" containerName="registry-server" probeResult="failure" output=< Nov 26 06:34:51 crc kubenswrapper[4871]: timeout: failed to connect service ":50051" within 1s Nov 26 06:34:51 crc kubenswrapper[4871]: > Nov 26 06:34:52 crc kubenswrapper[4871]: I1126 06:34:52.418274 4871 generic.go:334] "Generic (PLEG): container finished" podID="771e97ca-9217-4ee6-a59c-87aeef828db8" containerID="ba6783891e0e85ad9379e8e5661b7a65663472ef666882c46071427574f5c1ed" exitCode=0 Nov 26 06:34:52 crc kubenswrapper[4871]: I1126 06:34:52.418317 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jbrm2" event={"ID":"771e97ca-9217-4ee6-a59c-87aeef828db8","Type":"ContainerDied","Data":"ba6783891e0e85ad9379e8e5661b7a65663472ef666882c46071427574f5c1ed"} Nov 26 06:34:53 crc kubenswrapper[4871]: I1126 06:34:53.433843 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jbrm2" event={"ID":"771e97ca-9217-4ee6-a59c-87aeef828db8","Type":"ContainerStarted","Data":"e9129d7995c5c9ce35d687933c5cb3b937c5a1814990aed3f8c2d06cdb15a0e5"} Nov 26 06:34:53 crc kubenswrapper[4871]: I1126 06:34:53.454457 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jbrm2" podStartSLOduration=3.001514426 podStartE2EDuration="6.454441237s" podCreationTimestamp="2025-11-26 06:34:47 +0000 UTC" firstStartedPulling="2025-11-26 06:34:49.386083874 +0000 UTC m=+4147.569135460" lastFinishedPulling="2025-11-26 06:34:52.839010675 +0000 UTC m=+4151.022062271" observedRunningTime="2025-11-26 06:34:53.447788482 +0000 UTC m=+4151.630840088" watchObservedRunningTime="2025-11-26 06:34:53.454441237 +0000 UTC m=+4151.637492823" Nov 26 06:34:53 crc kubenswrapper[4871]: I1126 06:34:53.615360 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:34:53 crc kubenswrapper[4871]: I1126 06:34:53.615702 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:34:53 crc kubenswrapper[4871]: I1126 06:34:53.615761 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 06:34:53 crc kubenswrapper[4871]: I1126 06:34:53.616480 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"82a89614896222fd0545f77def74dd81122d48e02666ae4d8a09ea6872bbf47d"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 06:34:53 crc kubenswrapper[4871]: I1126 06:34:53.616591 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://82a89614896222fd0545f77def74dd81122d48e02666ae4d8a09ea6872bbf47d" gracePeriod=600 Nov 26 06:34:54 crc kubenswrapper[4871]: I1126 06:34:54.447770 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="82a89614896222fd0545f77def74dd81122d48e02666ae4d8a09ea6872bbf47d" exitCode=0 Nov 26 06:34:54 crc kubenswrapper[4871]: I1126 06:34:54.448918 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"82a89614896222fd0545f77def74dd81122d48e02666ae4d8a09ea6872bbf47d"} Nov 26 06:34:54 crc kubenswrapper[4871]: I1126 06:34:54.449004 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd"} Nov 26 06:34:54 crc kubenswrapper[4871]: I1126 06:34:54.449032 4871 scope.go:117] "RemoveContainer" containerID="b64bf8e03a50854524c411bfd0b9a0e6fdc9e72eb4fcaf05b1589f8dc467e5e2" Nov 26 06:34:57 crc kubenswrapper[4871]: I1126 06:34:57.662088 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jbrm2" Nov 26 06:34:57 crc kubenswrapper[4871]: I1126 06:34:57.662491 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jbrm2" Nov 26 06:34:57 crc kubenswrapper[4871]: I1126 06:34:57.717579 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jbrm2" Nov 26 06:34:58 crc kubenswrapper[4871]: I1126 06:34:58.579976 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jbrm2" Nov 26 06:34:58 crc kubenswrapper[4871]: I1126 06:34:58.654296 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jbrm2"] Nov 26 06:35:00 crc kubenswrapper[4871]: I1126 06:35:00.518194 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jbrm2" podUID="771e97ca-9217-4ee6-a59c-87aeef828db8" containerName="registry-server" containerID="cri-o://e9129d7995c5c9ce35d687933c5cb3b937c5a1814990aed3f8c2d06cdb15a0e5" gracePeriod=2 Nov 26 06:35:00 crc kubenswrapper[4871]: I1126 06:35:00.548146 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pvf75" Nov 26 06:35:00 crc kubenswrapper[4871]: I1126 06:35:00.606677 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pvf75" Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.027055 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jbrm2" Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.160963 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/771e97ca-9217-4ee6-a59c-87aeef828db8-utilities\") pod \"771e97ca-9217-4ee6-a59c-87aeef828db8\" (UID: \"771e97ca-9217-4ee6-a59c-87aeef828db8\") " Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.161292 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zphd\" (UniqueName: \"kubernetes.io/projected/771e97ca-9217-4ee6-a59c-87aeef828db8-kube-api-access-7zphd\") pod \"771e97ca-9217-4ee6-a59c-87aeef828db8\" (UID: \"771e97ca-9217-4ee6-a59c-87aeef828db8\") " Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.161384 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/771e97ca-9217-4ee6-a59c-87aeef828db8-catalog-content\") pod \"771e97ca-9217-4ee6-a59c-87aeef828db8\" (UID: \"771e97ca-9217-4ee6-a59c-87aeef828db8\") " Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.162254 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/771e97ca-9217-4ee6-a59c-87aeef828db8-utilities" (OuterVolumeSpecName: "utilities") pod "771e97ca-9217-4ee6-a59c-87aeef828db8" (UID: "771e97ca-9217-4ee6-a59c-87aeef828db8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.167399 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/771e97ca-9217-4ee6-a59c-87aeef828db8-kube-api-access-7zphd" (OuterVolumeSpecName: "kube-api-access-7zphd") pod "771e97ca-9217-4ee6-a59c-87aeef828db8" (UID: "771e97ca-9217-4ee6-a59c-87aeef828db8"). InnerVolumeSpecName "kube-api-access-7zphd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.223078 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/771e97ca-9217-4ee6-a59c-87aeef828db8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "771e97ca-9217-4ee6-a59c-87aeef828db8" (UID: "771e97ca-9217-4ee6-a59c-87aeef828db8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.263378 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zphd\" (UniqueName: \"kubernetes.io/projected/771e97ca-9217-4ee6-a59c-87aeef828db8-kube-api-access-7zphd\") on node \"crc\" DevicePath \"\"" Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.263407 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/771e97ca-9217-4ee6-a59c-87aeef828db8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.263416 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/771e97ca-9217-4ee6-a59c-87aeef828db8-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.529621 4871 generic.go:334] "Generic (PLEG): container finished" podID="771e97ca-9217-4ee6-a59c-87aeef828db8" containerID="e9129d7995c5c9ce35d687933c5cb3b937c5a1814990aed3f8c2d06cdb15a0e5" exitCode=0 Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.529670 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jbrm2" event={"ID":"771e97ca-9217-4ee6-a59c-87aeef828db8","Type":"ContainerDied","Data":"e9129d7995c5c9ce35d687933c5cb3b937c5a1814990aed3f8c2d06cdb15a0e5"} Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.529710 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jbrm2" Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.529737 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jbrm2" event={"ID":"771e97ca-9217-4ee6-a59c-87aeef828db8","Type":"ContainerDied","Data":"a916ec90f57dcb6169410d263577e93a0065a02e785686ab3364689946bb65ad"} Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.529766 4871 scope.go:117] "RemoveContainer" containerID="e9129d7995c5c9ce35d687933c5cb3b937c5a1814990aed3f8c2d06cdb15a0e5" Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.568465 4871 scope.go:117] "RemoveContainer" containerID="ba6783891e0e85ad9379e8e5661b7a65663472ef666882c46071427574f5c1ed" Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.579784 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pvf75"] Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.600515 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jbrm2"] Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.615778 4871 scope.go:117] "RemoveContainer" containerID="d0367d1f5046bb158d018bc2a721ff4c8c346fe4dd33c874796a9e205384c74f" Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.621062 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jbrm2"] Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.659541 4871 scope.go:117] "RemoveContainer" containerID="e9129d7995c5c9ce35d687933c5cb3b937c5a1814990aed3f8c2d06cdb15a0e5" Nov 26 06:35:01 crc kubenswrapper[4871]: E1126 06:35:01.660065 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9129d7995c5c9ce35d687933c5cb3b937c5a1814990aed3f8c2d06cdb15a0e5\": container with ID starting with e9129d7995c5c9ce35d687933c5cb3b937c5a1814990aed3f8c2d06cdb15a0e5 not found: ID does not exist" containerID="e9129d7995c5c9ce35d687933c5cb3b937c5a1814990aed3f8c2d06cdb15a0e5" Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.660116 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9129d7995c5c9ce35d687933c5cb3b937c5a1814990aed3f8c2d06cdb15a0e5"} err="failed to get container status \"e9129d7995c5c9ce35d687933c5cb3b937c5a1814990aed3f8c2d06cdb15a0e5\": rpc error: code = NotFound desc = could not find container \"e9129d7995c5c9ce35d687933c5cb3b937c5a1814990aed3f8c2d06cdb15a0e5\": container with ID starting with e9129d7995c5c9ce35d687933c5cb3b937c5a1814990aed3f8c2d06cdb15a0e5 not found: ID does not exist" Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.660147 4871 scope.go:117] "RemoveContainer" containerID="ba6783891e0e85ad9379e8e5661b7a65663472ef666882c46071427574f5c1ed" Nov 26 06:35:01 crc kubenswrapper[4871]: E1126 06:35:01.660493 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba6783891e0e85ad9379e8e5661b7a65663472ef666882c46071427574f5c1ed\": container with ID starting with ba6783891e0e85ad9379e8e5661b7a65663472ef666882c46071427574f5c1ed not found: ID does not exist" containerID="ba6783891e0e85ad9379e8e5661b7a65663472ef666882c46071427574f5c1ed" Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.660559 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba6783891e0e85ad9379e8e5661b7a65663472ef666882c46071427574f5c1ed"} err="failed to get container status \"ba6783891e0e85ad9379e8e5661b7a65663472ef666882c46071427574f5c1ed\": rpc error: code = NotFound desc = could not find container \"ba6783891e0e85ad9379e8e5661b7a65663472ef666882c46071427574f5c1ed\": container with ID starting with ba6783891e0e85ad9379e8e5661b7a65663472ef666882c46071427574f5c1ed not found: ID does not exist" Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.660582 4871 scope.go:117] "RemoveContainer" containerID="d0367d1f5046bb158d018bc2a721ff4c8c346fe4dd33c874796a9e205384c74f" Nov 26 06:35:01 crc kubenswrapper[4871]: E1126 06:35:01.660888 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0367d1f5046bb158d018bc2a721ff4c8c346fe4dd33c874796a9e205384c74f\": container with ID starting with d0367d1f5046bb158d018bc2a721ff4c8c346fe4dd33c874796a9e205384c74f not found: ID does not exist" containerID="d0367d1f5046bb158d018bc2a721ff4c8c346fe4dd33c874796a9e205384c74f" Nov 26 06:35:01 crc kubenswrapper[4871]: I1126 06:35:01.660927 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0367d1f5046bb158d018bc2a721ff4c8c346fe4dd33c874796a9e205384c74f"} err="failed to get container status \"d0367d1f5046bb158d018bc2a721ff4c8c346fe4dd33c874796a9e205384c74f\": rpc error: code = NotFound desc = could not find container \"d0367d1f5046bb158d018bc2a721ff4c8c346fe4dd33c874796a9e205384c74f\": container with ID starting with d0367d1f5046bb158d018bc2a721ff4c8c346fe4dd33c874796a9e205384c74f not found: ID does not exist" Nov 26 06:35:02 crc kubenswrapper[4871]: I1126 06:35:02.529607 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="771e97ca-9217-4ee6-a59c-87aeef828db8" path="/var/lib/kubelet/pods/771e97ca-9217-4ee6-a59c-87aeef828db8/volumes" Nov 26 06:35:02 crc kubenswrapper[4871]: I1126 06:35:02.543446 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pvf75" podUID="6aa1d735-783e-464f-b3f2-4c73f77cc805" containerName="registry-server" containerID="cri-o://817ac951666e7b370808716c00472eec73245f9368e1d87860664690a9c011a5" gracePeriod=2 Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.042620 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pvf75" Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.209789 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aa1d735-783e-464f-b3f2-4c73f77cc805-catalog-content\") pod \"6aa1d735-783e-464f-b3f2-4c73f77cc805\" (UID: \"6aa1d735-783e-464f-b3f2-4c73f77cc805\") " Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.209989 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7wxn\" (UniqueName: \"kubernetes.io/projected/6aa1d735-783e-464f-b3f2-4c73f77cc805-kube-api-access-v7wxn\") pod \"6aa1d735-783e-464f-b3f2-4c73f77cc805\" (UID: \"6aa1d735-783e-464f-b3f2-4c73f77cc805\") " Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.210229 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aa1d735-783e-464f-b3f2-4c73f77cc805-utilities\") pod \"6aa1d735-783e-464f-b3f2-4c73f77cc805\" (UID: \"6aa1d735-783e-464f-b3f2-4c73f77cc805\") " Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.212063 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6aa1d735-783e-464f-b3f2-4c73f77cc805-utilities" (OuterVolumeSpecName: "utilities") pod "6aa1d735-783e-464f-b3f2-4c73f77cc805" (UID: "6aa1d735-783e-464f-b3f2-4c73f77cc805"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.215494 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6aa1d735-783e-464f-b3f2-4c73f77cc805-kube-api-access-v7wxn" (OuterVolumeSpecName: "kube-api-access-v7wxn") pod "6aa1d735-783e-464f-b3f2-4c73f77cc805" (UID: "6aa1d735-783e-464f-b3f2-4c73f77cc805"). InnerVolumeSpecName "kube-api-access-v7wxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.313603 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aa1d735-783e-464f-b3f2-4c73f77cc805-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.313658 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7wxn\" (UniqueName: \"kubernetes.io/projected/6aa1d735-783e-464f-b3f2-4c73f77cc805-kube-api-access-v7wxn\") on node \"crc\" DevicePath \"\"" Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.336254 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6aa1d735-783e-464f-b3f2-4c73f77cc805-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6aa1d735-783e-464f-b3f2-4c73f77cc805" (UID: "6aa1d735-783e-464f-b3f2-4c73f77cc805"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.415811 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aa1d735-783e-464f-b3f2-4c73f77cc805-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.557709 4871 generic.go:334] "Generic (PLEG): container finished" podID="6aa1d735-783e-464f-b3f2-4c73f77cc805" containerID="817ac951666e7b370808716c00472eec73245f9368e1d87860664690a9c011a5" exitCode=0 Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.557751 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvf75" event={"ID":"6aa1d735-783e-464f-b3f2-4c73f77cc805","Type":"ContainerDied","Data":"817ac951666e7b370808716c00472eec73245f9368e1d87860664690a9c011a5"} Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.557777 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvf75" event={"ID":"6aa1d735-783e-464f-b3f2-4c73f77cc805","Type":"ContainerDied","Data":"44ccc34c348b775d5d2a2147f6fe5e4df8e89b1374b25e8b8262df2d1d4221d1"} Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.557794 4871 scope.go:117] "RemoveContainer" containerID="817ac951666e7b370808716c00472eec73245f9368e1d87860664690a9c011a5" Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.557918 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pvf75" Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.595132 4871 scope.go:117] "RemoveContainer" containerID="d381d4cc7a0869e0a009e81365708c06b91104e894c5b053d4626759fbc83a26" Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.601784 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pvf75"] Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.611793 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pvf75"] Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.636131 4871 scope.go:117] "RemoveContainer" containerID="9a9fb6dc7a58bbe2aaf85386cdae06ba3289ef45a7be9343d010a1dfeee9f2e1" Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.698512 4871 scope.go:117] "RemoveContainer" containerID="817ac951666e7b370808716c00472eec73245f9368e1d87860664690a9c011a5" Nov 26 06:35:03 crc kubenswrapper[4871]: E1126 06:35:03.699255 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"817ac951666e7b370808716c00472eec73245f9368e1d87860664690a9c011a5\": container with ID starting with 817ac951666e7b370808716c00472eec73245f9368e1d87860664690a9c011a5 not found: ID does not exist" containerID="817ac951666e7b370808716c00472eec73245f9368e1d87860664690a9c011a5" Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.699294 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"817ac951666e7b370808716c00472eec73245f9368e1d87860664690a9c011a5"} err="failed to get container status \"817ac951666e7b370808716c00472eec73245f9368e1d87860664690a9c011a5\": rpc error: code = NotFound desc = could not find container \"817ac951666e7b370808716c00472eec73245f9368e1d87860664690a9c011a5\": container with ID starting with 817ac951666e7b370808716c00472eec73245f9368e1d87860664690a9c011a5 not found: ID does not exist" Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.699317 4871 scope.go:117] "RemoveContainer" containerID="d381d4cc7a0869e0a009e81365708c06b91104e894c5b053d4626759fbc83a26" Nov 26 06:35:03 crc kubenswrapper[4871]: E1126 06:35:03.701462 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d381d4cc7a0869e0a009e81365708c06b91104e894c5b053d4626759fbc83a26\": container with ID starting with d381d4cc7a0869e0a009e81365708c06b91104e894c5b053d4626759fbc83a26 not found: ID does not exist" containerID="d381d4cc7a0869e0a009e81365708c06b91104e894c5b053d4626759fbc83a26" Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.701577 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d381d4cc7a0869e0a009e81365708c06b91104e894c5b053d4626759fbc83a26"} err="failed to get container status \"d381d4cc7a0869e0a009e81365708c06b91104e894c5b053d4626759fbc83a26\": rpc error: code = NotFound desc = could not find container \"d381d4cc7a0869e0a009e81365708c06b91104e894c5b053d4626759fbc83a26\": container with ID starting with d381d4cc7a0869e0a009e81365708c06b91104e894c5b053d4626759fbc83a26 not found: ID does not exist" Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.701631 4871 scope.go:117] "RemoveContainer" containerID="9a9fb6dc7a58bbe2aaf85386cdae06ba3289ef45a7be9343d010a1dfeee9f2e1" Nov 26 06:35:03 crc kubenswrapper[4871]: E1126 06:35:03.702119 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a9fb6dc7a58bbe2aaf85386cdae06ba3289ef45a7be9343d010a1dfeee9f2e1\": container with ID starting with 9a9fb6dc7a58bbe2aaf85386cdae06ba3289ef45a7be9343d010a1dfeee9f2e1 not found: ID does not exist" containerID="9a9fb6dc7a58bbe2aaf85386cdae06ba3289ef45a7be9343d010a1dfeee9f2e1" Nov 26 06:35:03 crc kubenswrapper[4871]: I1126 06:35:03.702144 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a9fb6dc7a58bbe2aaf85386cdae06ba3289ef45a7be9343d010a1dfeee9f2e1"} err="failed to get container status \"9a9fb6dc7a58bbe2aaf85386cdae06ba3289ef45a7be9343d010a1dfeee9f2e1\": rpc error: code = NotFound desc = could not find container \"9a9fb6dc7a58bbe2aaf85386cdae06ba3289ef45a7be9343d010a1dfeee9f2e1\": container with ID starting with 9a9fb6dc7a58bbe2aaf85386cdae06ba3289ef45a7be9343d010a1dfeee9f2e1 not found: ID does not exist" Nov 26 06:35:04 crc kubenswrapper[4871]: I1126 06:35:04.520005 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6aa1d735-783e-464f-b3f2-4c73f77cc805" path="/var/lib/kubelet/pods/6aa1d735-783e-464f-b3f2-4c73f77cc805/volumes" Nov 26 06:35:36 crc kubenswrapper[4871]: I1126 06:35:36.761740 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="1a6ce456-795f-4bf1-bab9-f5de7cfd7abe" containerName="galera" probeResult="failure" output="command timed out" Nov 26 06:35:36 crc kubenswrapper[4871]: I1126 06:35:36.761761 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="1a6ce456-795f-4bf1-bab9-f5de7cfd7abe" containerName="galera" probeResult="failure" output="command timed out" Nov 26 06:36:53 crc kubenswrapper[4871]: I1126 06:36:53.615218 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:36:53 crc kubenswrapper[4871]: I1126 06:36:53.615838 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:37:23 crc kubenswrapper[4871]: I1126 06:37:23.615544 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:37:23 crc kubenswrapper[4871]: I1126 06:37:23.616050 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:37:38 crc kubenswrapper[4871]: E1126 06:37:38.074076 4871 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.44:39596->38.102.83.44:38809: write tcp 38.102.83.44:39596->38.102.83.44:38809: write: broken pipe Nov 26 06:37:53 crc kubenswrapper[4871]: I1126 06:37:53.615023 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:37:53 crc kubenswrapper[4871]: I1126 06:37:53.615784 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:37:53 crc kubenswrapper[4871]: I1126 06:37:53.615867 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 06:37:53 crc kubenswrapper[4871]: I1126 06:37:53.617042 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 06:37:53 crc kubenswrapper[4871]: I1126 06:37:53.617150 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" gracePeriod=600 Nov 26 06:37:53 crc kubenswrapper[4871]: E1126 06:37:53.752990 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:37:54 crc kubenswrapper[4871]: I1126 06:37:54.708245 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" exitCode=0 Nov 26 06:37:54 crc kubenswrapper[4871]: I1126 06:37:54.708309 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd"} Nov 26 06:37:54 crc kubenswrapper[4871]: I1126 06:37:54.708681 4871 scope.go:117] "RemoveContainer" containerID="82a89614896222fd0545f77def74dd81122d48e02666ae4d8a09ea6872bbf47d" Nov 26 06:37:54 crc kubenswrapper[4871]: I1126 06:37:54.709664 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:37:54 crc kubenswrapper[4871]: E1126 06:37:54.710149 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:38:08 crc kubenswrapper[4871]: I1126 06:38:08.508017 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:38:08 crc kubenswrapper[4871]: E1126 06:38:08.510569 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:38:23 crc kubenswrapper[4871]: I1126 06:38:23.507196 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:38:23 crc kubenswrapper[4871]: E1126 06:38:23.508003 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:38:35 crc kubenswrapper[4871]: I1126 06:38:35.507247 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:38:35 crc kubenswrapper[4871]: E1126 06:38:35.508079 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:38:48 crc kubenswrapper[4871]: I1126 06:38:48.507285 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:38:48 crc kubenswrapper[4871]: E1126 06:38:48.508030 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:39:00 crc kubenswrapper[4871]: I1126 06:39:00.508032 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:39:00 crc kubenswrapper[4871]: E1126 06:39:00.508952 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:39:12 crc kubenswrapper[4871]: I1126 06:39:12.515625 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:39:12 crc kubenswrapper[4871]: E1126 06:39:12.516347 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:39:24 crc kubenswrapper[4871]: I1126 06:39:24.507212 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:39:24 crc kubenswrapper[4871]: E1126 06:39:24.508162 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:39:39 crc kubenswrapper[4871]: I1126 06:39:39.508007 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:39:39 crc kubenswrapper[4871]: E1126 06:39:39.509097 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:39:54 crc kubenswrapper[4871]: I1126 06:39:54.508813 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:39:54 crc kubenswrapper[4871]: E1126 06:39:54.509664 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:40:07 crc kubenswrapper[4871]: I1126 06:40:07.507388 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:40:07 crc kubenswrapper[4871]: E1126 06:40:07.508127 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:40:18 crc kubenswrapper[4871]: I1126 06:40:18.507520 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:40:18 crc kubenswrapper[4871]: E1126 06:40:18.509664 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:40:32 crc kubenswrapper[4871]: I1126 06:40:32.521026 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:40:32 crc kubenswrapper[4871]: E1126 06:40:32.521916 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:40:46 crc kubenswrapper[4871]: I1126 06:40:46.507301 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:40:46 crc kubenswrapper[4871]: E1126 06:40:46.508321 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:41:01 crc kubenswrapper[4871]: I1126 06:41:01.507902 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:41:01 crc kubenswrapper[4871]: E1126 06:41:01.508931 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:41:14 crc kubenswrapper[4871]: I1126 06:41:14.507257 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:41:14 crc kubenswrapper[4871]: E1126 06:41:14.508125 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:41:27 crc kubenswrapper[4871]: I1126 06:41:27.507785 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:41:27 crc kubenswrapper[4871]: E1126 06:41:27.508881 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:41:39 crc kubenswrapper[4871]: I1126 06:41:39.508213 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:41:39 crc kubenswrapper[4871]: E1126 06:41:39.509071 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:41:52 crc kubenswrapper[4871]: I1126 06:41:52.515944 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:41:52 crc kubenswrapper[4871]: E1126 06:41:52.516768 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:42:04 crc kubenswrapper[4871]: I1126 06:42:04.507237 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:42:04 crc kubenswrapper[4871]: E1126 06:42:04.508238 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:42:16 crc kubenswrapper[4871]: I1126 06:42:16.507254 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:42:16 crc kubenswrapper[4871]: E1126 06:42:16.508212 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:42:26 crc kubenswrapper[4871]: I1126 06:42:26.760948 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="1a6ce456-795f-4bf1-bab9-f5de7cfd7abe" containerName="galera" probeResult="failure" output="command timed out" Nov 26 06:42:26 crc kubenswrapper[4871]: I1126 06:42:26.761029 4871 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="1a6ce456-795f-4bf1-bab9-f5de7cfd7abe" containerName="galera" probeResult="failure" output="command timed out" Nov 26 06:42:28 crc kubenswrapper[4871]: I1126 06:42:28.507018 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:42:28 crc kubenswrapper[4871]: E1126 06:42:28.507645 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.494988 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6cnzt"] Nov 26 06:42:40 crc kubenswrapper[4871]: E1126 06:42:40.496010 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6aa1d735-783e-464f-b3f2-4c73f77cc805" containerName="extract-utilities" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.496027 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6aa1d735-783e-464f-b3f2-4c73f77cc805" containerName="extract-utilities" Nov 26 06:42:40 crc kubenswrapper[4871]: E1126 06:42:40.496053 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6aa1d735-783e-464f-b3f2-4c73f77cc805" containerName="extract-content" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.496063 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6aa1d735-783e-464f-b3f2-4c73f77cc805" containerName="extract-content" Nov 26 06:42:40 crc kubenswrapper[4871]: E1126 06:42:40.496086 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="771e97ca-9217-4ee6-a59c-87aeef828db8" containerName="extract-utilities" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.496093 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="771e97ca-9217-4ee6-a59c-87aeef828db8" containerName="extract-utilities" Nov 26 06:42:40 crc kubenswrapper[4871]: E1126 06:42:40.496104 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="771e97ca-9217-4ee6-a59c-87aeef828db8" containerName="extract-content" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.496111 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="771e97ca-9217-4ee6-a59c-87aeef828db8" containerName="extract-content" Nov 26 06:42:40 crc kubenswrapper[4871]: E1126 06:42:40.496135 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6aa1d735-783e-464f-b3f2-4c73f77cc805" containerName="registry-server" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.496142 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6aa1d735-783e-464f-b3f2-4c73f77cc805" containerName="registry-server" Nov 26 06:42:40 crc kubenswrapper[4871]: E1126 06:42:40.496152 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="771e97ca-9217-4ee6-a59c-87aeef828db8" containerName="registry-server" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.496159 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="771e97ca-9217-4ee6-a59c-87aeef828db8" containerName="registry-server" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.496369 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="6aa1d735-783e-464f-b3f2-4c73f77cc805" containerName="registry-server" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.496402 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="771e97ca-9217-4ee6-a59c-87aeef828db8" containerName="registry-server" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.498149 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6cnzt" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.522425 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6cnzt"] Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.612679 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/743fc7b6-f220-4d3d-8391-0b07fe40b607-utilities\") pod \"certified-operators-6cnzt\" (UID: \"743fc7b6-f220-4d3d-8391-0b07fe40b607\") " pod="openshift-marketplace/certified-operators-6cnzt" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.613123 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/743fc7b6-f220-4d3d-8391-0b07fe40b607-catalog-content\") pod \"certified-operators-6cnzt\" (UID: \"743fc7b6-f220-4d3d-8391-0b07fe40b607\") " pod="openshift-marketplace/certified-operators-6cnzt" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.613300 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfp5s\" (UniqueName: \"kubernetes.io/projected/743fc7b6-f220-4d3d-8391-0b07fe40b607-kube-api-access-tfp5s\") pod \"certified-operators-6cnzt\" (UID: \"743fc7b6-f220-4d3d-8391-0b07fe40b607\") " pod="openshift-marketplace/certified-operators-6cnzt" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.715130 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/743fc7b6-f220-4d3d-8391-0b07fe40b607-catalog-content\") pod \"certified-operators-6cnzt\" (UID: \"743fc7b6-f220-4d3d-8391-0b07fe40b607\") " pod="openshift-marketplace/certified-operators-6cnzt" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.715424 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfp5s\" (UniqueName: \"kubernetes.io/projected/743fc7b6-f220-4d3d-8391-0b07fe40b607-kube-api-access-tfp5s\") pod \"certified-operators-6cnzt\" (UID: \"743fc7b6-f220-4d3d-8391-0b07fe40b607\") " pod="openshift-marketplace/certified-operators-6cnzt" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.715618 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/743fc7b6-f220-4d3d-8391-0b07fe40b607-utilities\") pod \"certified-operators-6cnzt\" (UID: \"743fc7b6-f220-4d3d-8391-0b07fe40b607\") " pod="openshift-marketplace/certified-operators-6cnzt" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.715967 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/743fc7b6-f220-4d3d-8391-0b07fe40b607-catalog-content\") pod \"certified-operators-6cnzt\" (UID: \"743fc7b6-f220-4d3d-8391-0b07fe40b607\") " pod="openshift-marketplace/certified-operators-6cnzt" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.716018 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/743fc7b6-f220-4d3d-8391-0b07fe40b607-utilities\") pod \"certified-operators-6cnzt\" (UID: \"743fc7b6-f220-4d3d-8391-0b07fe40b607\") " pod="openshift-marketplace/certified-operators-6cnzt" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.748333 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfp5s\" (UniqueName: \"kubernetes.io/projected/743fc7b6-f220-4d3d-8391-0b07fe40b607-kube-api-access-tfp5s\") pod \"certified-operators-6cnzt\" (UID: \"743fc7b6-f220-4d3d-8391-0b07fe40b607\") " pod="openshift-marketplace/certified-operators-6cnzt" Nov 26 06:42:40 crc kubenswrapper[4871]: I1126 06:42:40.818701 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6cnzt" Nov 26 06:42:41 crc kubenswrapper[4871]: I1126 06:42:41.457167 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6cnzt"] Nov 26 06:42:42 crc kubenswrapper[4871]: I1126 06:42:42.012020 4871 generic.go:334] "Generic (PLEG): container finished" podID="743fc7b6-f220-4d3d-8391-0b07fe40b607" containerID="d959d2d2488bb7664a2efa1b5989baadfb95ea048a5d9ea5cb79474ba972f3a4" exitCode=0 Nov 26 06:42:42 crc kubenswrapper[4871]: I1126 06:42:42.012138 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6cnzt" event={"ID":"743fc7b6-f220-4d3d-8391-0b07fe40b607","Type":"ContainerDied","Data":"d959d2d2488bb7664a2efa1b5989baadfb95ea048a5d9ea5cb79474ba972f3a4"} Nov 26 06:42:42 crc kubenswrapper[4871]: I1126 06:42:42.012403 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6cnzt" event={"ID":"743fc7b6-f220-4d3d-8391-0b07fe40b607","Type":"ContainerStarted","Data":"32dbab9d67580dfa646bc9e5c678b6b38f0b0436bfbccfe784f5836fe8391263"} Nov 26 06:42:42 crc kubenswrapper[4871]: I1126 06:42:42.014730 4871 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 06:42:42 crc kubenswrapper[4871]: I1126 06:42:42.513267 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:42:42 crc kubenswrapper[4871]: E1126 06:42:42.513549 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:42:43 crc kubenswrapper[4871]: I1126 06:42:43.025331 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6cnzt" event={"ID":"743fc7b6-f220-4d3d-8391-0b07fe40b607","Type":"ContainerStarted","Data":"a0c106c52371d8d4d6ee6598c6600dc1c73fe8761a843e810ef1da85fad8be04"} Nov 26 06:42:44 crc kubenswrapper[4871]: I1126 06:42:44.038624 4871 generic.go:334] "Generic (PLEG): container finished" podID="743fc7b6-f220-4d3d-8391-0b07fe40b607" containerID="a0c106c52371d8d4d6ee6598c6600dc1c73fe8761a843e810ef1da85fad8be04" exitCode=0 Nov 26 06:42:44 crc kubenswrapper[4871]: I1126 06:42:44.038734 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6cnzt" event={"ID":"743fc7b6-f220-4d3d-8391-0b07fe40b607","Type":"ContainerDied","Data":"a0c106c52371d8d4d6ee6598c6600dc1c73fe8761a843e810ef1da85fad8be04"} Nov 26 06:42:45 crc kubenswrapper[4871]: I1126 06:42:45.049746 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6cnzt" event={"ID":"743fc7b6-f220-4d3d-8391-0b07fe40b607","Type":"ContainerStarted","Data":"62c6fb84c41f81defcfb9a4257af781527ffe5471bb718d4c8aa1aa12cd67cc0"} Nov 26 06:42:45 crc kubenswrapper[4871]: I1126 06:42:45.069915 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6cnzt" podStartSLOduration=2.595593634 podStartE2EDuration="5.069894924s" podCreationTimestamp="2025-11-26 06:42:40 +0000 UTC" firstStartedPulling="2025-11-26 06:42:42.014480384 +0000 UTC m=+4620.197531970" lastFinishedPulling="2025-11-26 06:42:44.488781674 +0000 UTC m=+4622.671833260" observedRunningTime="2025-11-26 06:42:45.068370477 +0000 UTC m=+4623.251422073" watchObservedRunningTime="2025-11-26 06:42:45.069894924 +0000 UTC m=+4623.252946510" Nov 26 06:42:50 crc kubenswrapper[4871]: I1126 06:42:50.819401 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6cnzt" Nov 26 06:42:50 crc kubenswrapper[4871]: I1126 06:42:50.820212 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6cnzt" Nov 26 06:42:50 crc kubenswrapper[4871]: I1126 06:42:50.886142 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6cnzt" Nov 26 06:42:51 crc kubenswrapper[4871]: I1126 06:42:51.173280 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6cnzt" Nov 26 06:42:51 crc kubenswrapper[4871]: I1126 06:42:51.223181 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6cnzt"] Nov 26 06:42:53 crc kubenswrapper[4871]: I1126 06:42:53.143138 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6cnzt" podUID="743fc7b6-f220-4d3d-8391-0b07fe40b607" containerName="registry-server" containerID="cri-o://62c6fb84c41f81defcfb9a4257af781527ffe5471bb718d4c8aa1aa12cd67cc0" gracePeriod=2 Nov 26 06:42:53 crc kubenswrapper[4871]: I1126 06:42:53.509563 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:42:53 crc kubenswrapper[4871]: E1126 06:42:53.509860 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:42:53 crc kubenswrapper[4871]: I1126 06:42:53.654549 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6cnzt" Nov 26 06:42:53 crc kubenswrapper[4871]: I1126 06:42:53.798869 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/743fc7b6-f220-4d3d-8391-0b07fe40b607-utilities\") pod \"743fc7b6-f220-4d3d-8391-0b07fe40b607\" (UID: \"743fc7b6-f220-4d3d-8391-0b07fe40b607\") " Nov 26 06:42:53 crc kubenswrapper[4871]: I1126 06:42:53.798917 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/743fc7b6-f220-4d3d-8391-0b07fe40b607-catalog-content\") pod \"743fc7b6-f220-4d3d-8391-0b07fe40b607\" (UID: \"743fc7b6-f220-4d3d-8391-0b07fe40b607\") " Nov 26 06:42:53 crc kubenswrapper[4871]: I1126 06:42:53.799120 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tfp5s\" (UniqueName: \"kubernetes.io/projected/743fc7b6-f220-4d3d-8391-0b07fe40b607-kube-api-access-tfp5s\") pod \"743fc7b6-f220-4d3d-8391-0b07fe40b607\" (UID: \"743fc7b6-f220-4d3d-8391-0b07fe40b607\") " Nov 26 06:42:53 crc kubenswrapper[4871]: I1126 06:42:53.807184 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/743fc7b6-f220-4d3d-8391-0b07fe40b607-utilities" (OuterVolumeSpecName: "utilities") pod "743fc7b6-f220-4d3d-8391-0b07fe40b607" (UID: "743fc7b6-f220-4d3d-8391-0b07fe40b607"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:42:53 crc kubenswrapper[4871]: I1126 06:42:53.808905 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/743fc7b6-f220-4d3d-8391-0b07fe40b607-kube-api-access-tfp5s" (OuterVolumeSpecName: "kube-api-access-tfp5s") pod "743fc7b6-f220-4d3d-8391-0b07fe40b607" (UID: "743fc7b6-f220-4d3d-8391-0b07fe40b607"). InnerVolumeSpecName "kube-api-access-tfp5s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:42:53 crc kubenswrapper[4871]: I1126 06:42:53.866663 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/743fc7b6-f220-4d3d-8391-0b07fe40b607-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "743fc7b6-f220-4d3d-8391-0b07fe40b607" (UID: "743fc7b6-f220-4d3d-8391-0b07fe40b607"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:42:53 crc kubenswrapper[4871]: I1126 06:42:53.901357 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/743fc7b6-f220-4d3d-8391-0b07fe40b607-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:42:53 crc kubenswrapper[4871]: I1126 06:42:53.901405 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/743fc7b6-f220-4d3d-8391-0b07fe40b607-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:42:53 crc kubenswrapper[4871]: I1126 06:42:53.901420 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tfp5s\" (UniqueName: \"kubernetes.io/projected/743fc7b6-f220-4d3d-8391-0b07fe40b607-kube-api-access-tfp5s\") on node \"crc\" DevicePath \"\"" Nov 26 06:42:54 crc kubenswrapper[4871]: I1126 06:42:54.152726 4871 generic.go:334] "Generic (PLEG): container finished" podID="743fc7b6-f220-4d3d-8391-0b07fe40b607" containerID="62c6fb84c41f81defcfb9a4257af781527ffe5471bb718d4c8aa1aa12cd67cc0" exitCode=0 Nov 26 06:42:54 crc kubenswrapper[4871]: I1126 06:42:54.152775 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6cnzt" event={"ID":"743fc7b6-f220-4d3d-8391-0b07fe40b607","Type":"ContainerDied","Data":"62c6fb84c41f81defcfb9a4257af781527ffe5471bb718d4c8aa1aa12cd67cc0"} Nov 26 06:42:54 crc kubenswrapper[4871]: I1126 06:42:54.152805 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6cnzt" event={"ID":"743fc7b6-f220-4d3d-8391-0b07fe40b607","Type":"ContainerDied","Data":"32dbab9d67580dfa646bc9e5c678b6b38f0b0436bfbccfe784f5836fe8391263"} Nov 26 06:42:54 crc kubenswrapper[4871]: I1126 06:42:54.152813 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6cnzt" Nov 26 06:42:54 crc kubenswrapper[4871]: I1126 06:42:54.152826 4871 scope.go:117] "RemoveContainer" containerID="62c6fb84c41f81defcfb9a4257af781527ffe5471bb718d4c8aa1aa12cd67cc0" Nov 26 06:42:54 crc kubenswrapper[4871]: I1126 06:42:54.189775 4871 scope.go:117] "RemoveContainer" containerID="a0c106c52371d8d4d6ee6598c6600dc1c73fe8761a843e810ef1da85fad8be04" Nov 26 06:42:54 crc kubenswrapper[4871]: I1126 06:42:54.199271 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6cnzt"] Nov 26 06:42:54 crc kubenswrapper[4871]: I1126 06:42:54.215318 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6cnzt"] Nov 26 06:42:54 crc kubenswrapper[4871]: I1126 06:42:54.215502 4871 scope.go:117] "RemoveContainer" containerID="d959d2d2488bb7664a2efa1b5989baadfb95ea048a5d9ea5cb79474ba972f3a4" Nov 26 06:42:54 crc kubenswrapper[4871]: I1126 06:42:54.284517 4871 scope.go:117] "RemoveContainer" containerID="62c6fb84c41f81defcfb9a4257af781527ffe5471bb718d4c8aa1aa12cd67cc0" Nov 26 06:42:54 crc kubenswrapper[4871]: E1126 06:42:54.286913 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62c6fb84c41f81defcfb9a4257af781527ffe5471bb718d4c8aa1aa12cd67cc0\": container with ID starting with 62c6fb84c41f81defcfb9a4257af781527ffe5471bb718d4c8aa1aa12cd67cc0 not found: ID does not exist" containerID="62c6fb84c41f81defcfb9a4257af781527ffe5471bb718d4c8aa1aa12cd67cc0" Nov 26 06:42:54 crc kubenswrapper[4871]: I1126 06:42:54.287001 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62c6fb84c41f81defcfb9a4257af781527ffe5471bb718d4c8aa1aa12cd67cc0"} err="failed to get container status \"62c6fb84c41f81defcfb9a4257af781527ffe5471bb718d4c8aa1aa12cd67cc0\": rpc error: code = NotFound desc = could not find container \"62c6fb84c41f81defcfb9a4257af781527ffe5471bb718d4c8aa1aa12cd67cc0\": container with ID starting with 62c6fb84c41f81defcfb9a4257af781527ffe5471bb718d4c8aa1aa12cd67cc0 not found: ID does not exist" Nov 26 06:42:54 crc kubenswrapper[4871]: I1126 06:42:54.287053 4871 scope.go:117] "RemoveContainer" containerID="a0c106c52371d8d4d6ee6598c6600dc1c73fe8761a843e810ef1da85fad8be04" Nov 26 06:42:54 crc kubenswrapper[4871]: E1126 06:42:54.287497 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0c106c52371d8d4d6ee6598c6600dc1c73fe8761a843e810ef1da85fad8be04\": container with ID starting with a0c106c52371d8d4d6ee6598c6600dc1c73fe8761a843e810ef1da85fad8be04 not found: ID does not exist" containerID="a0c106c52371d8d4d6ee6598c6600dc1c73fe8761a843e810ef1da85fad8be04" Nov 26 06:42:54 crc kubenswrapper[4871]: I1126 06:42:54.287596 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0c106c52371d8d4d6ee6598c6600dc1c73fe8761a843e810ef1da85fad8be04"} err="failed to get container status \"a0c106c52371d8d4d6ee6598c6600dc1c73fe8761a843e810ef1da85fad8be04\": rpc error: code = NotFound desc = could not find container \"a0c106c52371d8d4d6ee6598c6600dc1c73fe8761a843e810ef1da85fad8be04\": container with ID starting with a0c106c52371d8d4d6ee6598c6600dc1c73fe8761a843e810ef1da85fad8be04 not found: ID does not exist" Nov 26 06:42:54 crc kubenswrapper[4871]: I1126 06:42:54.287639 4871 scope.go:117] "RemoveContainer" containerID="d959d2d2488bb7664a2efa1b5989baadfb95ea048a5d9ea5cb79474ba972f3a4" Nov 26 06:42:54 crc kubenswrapper[4871]: E1126 06:42:54.288176 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d959d2d2488bb7664a2efa1b5989baadfb95ea048a5d9ea5cb79474ba972f3a4\": container with ID starting with d959d2d2488bb7664a2efa1b5989baadfb95ea048a5d9ea5cb79474ba972f3a4 not found: ID does not exist" containerID="d959d2d2488bb7664a2efa1b5989baadfb95ea048a5d9ea5cb79474ba972f3a4" Nov 26 06:42:54 crc kubenswrapper[4871]: I1126 06:42:54.288215 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d959d2d2488bb7664a2efa1b5989baadfb95ea048a5d9ea5cb79474ba972f3a4"} err="failed to get container status \"d959d2d2488bb7664a2efa1b5989baadfb95ea048a5d9ea5cb79474ba972f3a4\": rpc error: code = NotFound desc = could not find container \"d959d2d2488bb7664a2efa1b5989baadfb95ea048a5d9ea5cb79474ba972f3a4\": container with ID starting with d959d2d2488bb7664a2efa1b5989baadfb95ea048a5d9ea5cb79474ba972f3a4 not found: ID does not exist" Nov 26 06:42:54 crc kubenswrapper[4871]: I1126 06:42:54.525476 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="743fc7b6-f220-4d3d-8391-0b07fe40b607" path="/var/lib/kubelet/pods/743fc7b6-f220-4d3d-8391-0b07fe40b607/volumes" Nov 26 06:43:04 crc kubenswrapper[4871]: I1126 06:43:04.507966 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:43:05 crc kubenswrapper[4871]: I1126 06:43:05.277972 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"9c988cadcd1c7db807d7f1faa554de6ce79af88aeccc2c41477f1c97e057816d"} Nov 26 06:43:13 crc kubenswrapper[4871]: I1126 06:43:13.873857 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-g9xcc"] Nov 26 06:43:13 crc kubenswrapper[4871]: E1126 06:43:13.875230 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="743fc7b6-f220-4d3d-8391-0b07fe40b607" containerName="extract-utilities" Nov 26 06:43:13 crc kubenswrapper[4871]: I1126 06:43:13.875251 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="743fc7b6-f220-4d3d-8391-0b07fe40b607" containerName="extract-utilities" Nov 26 06:43:13 crc kubenswrapper[4871]: E1126 06:43:13.875328 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="743fc7b6-f220-4d3d-8391-0b07fe40b607" containerName="registry-server" Nov 26 06:43:13 crc kubenswrapper[4871]: I1126 06:43:13.875340 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="743fc7b6-f220-4d3d-8391-0b07fe40b607" containerName="registry-server" Nov 26 06:43:13 crc kubenswrapper[4871]: E1126 06:43:13.875369 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="743fc7b6-f220-4d3d-8391-0b07fe40b607" containerName="extract-content" Nov 26 06:43:13 crc kubenswrapper[4871]: I1126 06:43:13.875380 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="743fc7b6-f220-4d3d-8391-0b07fe40b607" containerName="extract-content" Nov 26 06:43:13 crc kubenswrapper[4871]: I1126 06:43:13.875716 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="743fc7b6-f220-4d3d-8391-0b07fe40b607" containerName="registry-server" Nov 26 06:43:13 crc kubenswrapper[4871]: I1126 06:43:13.877815 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g9xcc" Nov 26 06:43:13 crc kubenswrapper[4871]: I1126 06:43:13.887656 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-g9xcc"] Nov 26 06:43:14 crc kubenswrapper[4871]: I1126 06:43:14.043326 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c78fa55-3cbb-4f45-a833-b6977ed05494-utilities\") pod \"redhat-marketplace-g9xcc\" (UID: \"9c78fa55-3cbb-4f45-a833-b6977ed05494\") " pod="openshift-marketplace/redhat-marketplace-g9xcc" Nov 26 06:43:14 crc kubenswrapper[4871]: I1126 06:43:14.043450 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c78fa55-3cbb-4f45-a833-b6977ed05494-catalog-content\") pod \"redhat-marketplace-g9xcc\" (UID: \"9c78fa55-3cbb-4f45-a833-b6977ed05494\") " pod="openshift-marketplace/redhat-marketplace-g9xcc" Nov 26 06:43:14 crc kubenswrapper[4871]: I1126 06:43:14.043563 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4wkl\" (UniqueName: \"kubernetes.io/projected/9c78fa55-3cbb-4f45-a833-b6977ed05494-kube-api-access-z4wkl\") pod \"redhat-marketplace-g9xcc\" (UID: \"9c78fa55-3cbb-4f45-a833-b6977ed05494\") " pod="openshift-marketplace/redhat-marketplace-g9xcc" Nov 26 06:43:14 crc kubenswrapper[4871]: I1126 06:43:14.145157 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c78fa55-3cbb-4f45-a833-b6977ed05494-utilities\") pod \"redhat-marketplace-g9xcc\" (UID: \"9c78fa55-3cbb-4f45-a833-b6977ed05494\") " pod="openshift-marketplace/redhat-marketplace-g9xcc" Nov 26 06:43:14 crc kubenswrapper[4871]: I1126 06:43:14.145287 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c78fa55-3cbb-4f45-a833-b6977ed05494-catalog-content\") pod \"redhat-marketplace-g9xcc\" (UID: \"9c78fa55-3cbb-4f45-a833-b6977ed05494\") " pod="openshift-marketplace/redhat-marketplace-g9xcc" Nov 26 06:43:14 crc kubenswrapper[4871]: I1126 06:43:14.145376 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4wkl\" (UniqueName: \"kubernetes.io/projected/9c78fa55-3cbb-4f45-a833-b6977ed05494-kube-api-access-z4wkl\") pod \"redhat-marketplace-g9xcc\" (UID: \"9c78fa55-3cbb-4f45-a833-b6977ed05494\") " pod="openshift-marketplace/redhat-marketplace-g9xcc" Nov 26 06:43:14 crc kubenswrapper[4871]: I1126 06:43:14.146132 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c78fa55-3cbb-4f45-a833-b6977ed05494-utilities\") pod \"redhat-marketplace-g9xcc\" (UID: \"9c78fa55-3cbb-4f45-a833-b6977ed05494\") " pod="openshift-marketplace/redhat-marketplace-g9xcc" Nov 26 06:43:14 crc kubenswrapper[4871]: I1126 06:43:14.146382 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c78fa55-3cbb-4f45-a833-b6977ed05494-catalog-content\") pod \"redhat-marketplace-g9xcc\" (UID: \"9c78fa55-3cbb-4f45-a833-b6977ed05494\") " pod="openshift-marketplace/redhat-marketplace-g9xcc" Nov 26 06:43:14 crc kubenswrapper[4871]: I1126 06:43:14.172330 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4wkl\" (UniqueName: \"kubernetes.io/projected/9c78fa55-3cbb-4f45-a833-b6977ed05494-kube-api-access-z4wkl\") pod \"redhat-marketplace-g9xcc\" (UID: \"9c78fa55-3cbb-4f45-a833-b6977ed05494\") " pod="openshift-marketplace/redhat-marketplace-g9xcc" Nov 26 06:43:14 crc kubenswrapper[4871]: I1126 06:43:14.209324 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g9xcc" Nov 26 06:43:14 crc kubenswrapper[4871]: I1126 06:43:14.789844 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-g9xcc"] Nov 26 06:43:14 crc kubenswrapper[4871]: W1126 06:43:14.794229 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9c78fa55_3cbb_4f45_a833_b6977ed05494.slice/crio-b2539dd0f76eb57dcd5322f323b021642fdbc91e217845423d9eb2c2784e67a2 WatchSource:0}: Error finding container b2539dd0f76eb57dcd5322f323b021642fdbc91e217845423d9eb2c2784e67a2: Status 404 returned error can't find the container with id b2539dd0f76eb57dcd5322f323b021642fdbc91e217845423d9eb2c2784e67a2 Nov 26 06:43:15 crc kubenswrapper[4871]: I1126 06:43:15.400922 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g9xcc" event={"ID":"9c78fa55-3cbb-4f45-a833-b6977ed05494","Type":"ContainerDied","Data":"f1dfd5b0024cf0694c5591458e9ff0a696ebb9f5e1f050470da0e4fb5113bd14"} Nov 26 06:43:15 crc kubenswrapper[4871]: I1126 06:43:15.401998 4871 generic.go:334] "Generic (PLEG): container finished" podID="9c78fa55-3cbb-4f45-a833-b6977ed05494" containerID="f1dfd5b0024cf0694c5591458e9ff0a696ebb9f5e1f050470da0e4fb5113bd14" exitCode=0 Nov 26 06:43:15 crc kubenswrapper[4871]: I1126 06:43:15.402099 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g9xcc" event={"ID":"9c78fa55-3cbb-4f45-a833-b6977ed05494","Type":"ContainerStarted","Data":"b2539dd0f76eb57dcd5322f323b021642fdbc91e217845423d9eb2c2784e67a2"} Nov 26 06:43:17 crc kubenswrapper[4871]: I1126 06:43:17.420960 4871 generic.go:334] "Generic (PLEG): container finished" podID="9c78fa55-3cbb-4f45-a833-b6977ed05494" containerID="32e2094331b018a4f667cb05cee28d7da62d3c0b250dddcc5676374d7af0396a" exitCode=0 Nov 26 06:43:17 crc kubenswrapper[4871]: I1126 06:43:17.421030 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g9xcc" event={"ID":"9c78fa55-3cbb-4f45-a833-b6977ed05494","Type":"ContainerDied","Data":"32e2094331b018a4f667cb05cee28d7da62d3c0b250dddcc5676374d7af0396a"} Nov 26 06:43:18 crc kubenswrapper[4871]: I1126 06:43:18.432773 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g9xcc" event={"ID":"9c78fa55-3cbb-4f45-a833-b6977ed05494","Type":"ContainerStarted","Data":"15beb4ef58b4c6c37162d79af2cf062329958d9a18b19b067fa2cc51d0663197"} Nov 26 06:43:18 crc kubenswrapper[4871]: I1126 06:43:18.456870 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-g9xcc" podStartSLOduration=2.799029979 podStartE2EDuration="5.456849203s" podCreationTimestamp="2025-11-26 06:43:13 +0000 UTC" firstStartedPulling="2025-11-26 06:43:15.402481389 +0000 UTC m=+4653.585532975" lastFinishedPulling="2025-11-26 06:43:18.060300603 +0000 UTC m=+4656.243352199" observedRunningTime="2025-11-26 06:43:18.450803963 +0000 UTC m=+4656.633855549" watchObservedRunningTime="2025-11-26 06:43:18.456849203 +0000 UTC m=+4656.639900789" Nov 26 06:43:24 crc kubenswrapper[4871]: I1126 06:43:24.209562 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-g9xcc" Nov 26 06:43:24 crc kubenswrapper[4871]: I1126 06:43:24.210080 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-g9xcc" Nov 26 06:43:24 crc kubenswrapper[4871]: I1126 06:43:24.259133 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-g9xcc" Nov 26 06:43:24 crc kubenswrapper[4871]: I1126 06:43:24.581394 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-g9xcc" Nov 26 06:43:24 crc kubenswrapper[4871]: I1126 06:43:24.667251 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-g9xcc"] Nov 26 06:43:26 crc kubenswrapper[4871]: I1126 06:43:26.518122 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-g9xcc" podUID="9c78fa55-3cbb-4f45-a833-b6977ed05494" containerName="registry-server" containerID="cri-o://15beb4ef58b4c6c37162d79af2cf062329958d9a18b19b067fa2cc51d0663197" gracePeriod=2 Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:26.999902 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g9xcc" Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.114714 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4wkl\" (UniqueName: \"kubernetes.io/projected/9c78fa55-3cbb-4f45-a833-b6977ed05494-kube-api-access-z4wkl\") pod \"9c78fa55-3cbb-4f45-a833-b6977ed05494\" (UID: \"9c78fa55-3cbb-4f45-a833-b6977ed05494\") " Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.115336 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c78fa55-3cbb-4f45-a833-b6977ed05494-utilities\") pod \"9c78fa55-3cbb-4f45-a833-b6977ed05494\" (UID: \"9c78fa55-3cbb-4f45-a833-b6977ed05494\") " Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.115463 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c78fa55-3cbb-4f45-a833-b6977ed05494-catalog-content\") pod \"9c78fa55-3cbb-4f45-a833-b6977ed05494\" (UID: \"9c78fa55-3cbb-4f45-a833-b6977ed05494\") " Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.116316 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c78fa55-3cbb-4f45-a833-b6977ed05494-utilities" (OuterVolumeSpecName: "utilities") pod "9c78fa55-3cbb-4f45-a833-b6977ed05494" (UID: "9c78fa55-3cbb-4f45-a833-b6977ed05494"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.117214 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c78fa55-3cbb-4f45-a833-b6977ed05494-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.121303 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c78fa55-3cbb-4f45-a833-b6977ed05494-kube-api-access-z4wkl" (OuterVolumeSpecName: "kube-api-access-z4wkl") pod "9c78fa55-3cbb-4f45-a833-b6977ed05494" (UID: "9c78fa55-3cbb-4f45-a833-b6977ed05494"). InnerVolumeSpecName "kube-api-access-z4wkl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.132708 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c78fa55-3cbb-4f45-a833-b6977ed05494-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9c78fa55-3cbb-4f45-a833-b6977ed05494" (UID: "9c78fa55-3cbb-4f45-a833-b6977ed05494"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.219663 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c78fa55-3cbb-4f45-a833-b6977ed05494-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.219714 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4wkl\" (UniqueName: \"kubernetes.io/projected/9c78fa55-3cbb-4f45-a833-b6977ed05494-kube-api-access-z4wkl\") on node \"crc\" DevicePath \"\"" Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.542555 4871 generic.go:334] "Generic (PLEG): container finished" podID="9c78fa55-3cbb-4f45-a833-b6977ed05494" containerID="15beb4ef58b4c6c37162d79af2cf062329958d9a18b19b067fa2cc51d0663197" exitCode=0 Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.542665 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g9xcc" event={"ID":"9c78fa55-3cbb-4f45-a833-b6977ed05494","Type":"ContainerDied","Data":"15beb4ef58b4c6c37162d79af2cf062329958d9a18b19b067fa2cc51d0663197"} Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.542716 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g9xcc" Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.542998 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g9xcc" event={"ID":"9c78fa55-3cbb-4f45-a833-b6977ed05494","Type":"ContainerDied","Data":"b2539dd0f76eb57dcd5322f323b021642fdbc91e217845423d9eb2c2784e67a2"} Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.543033 4871 scope.go:117] "RemoveContainer" containerID="15beb4ef58b4c6c37162d79af2cf062329958d9a18b19b067fa2cc51d0663197" Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.570153 4871 scope.go:117] "RemoveContainer" containerID="32e2094331b018a4f667cb05cee28d7da62d3c0b250dddcc5676374d7af0396a" Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.592742 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-g9xcc"] Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.604608 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-g9xcc"] Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.605295 4871 scope.go:117] "RemoveContainer" containerID="f1dfd5b0024cf0694c5591458e9ff0a696ebb9f5e1f050470da0e4fb5113bd14" Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.648551 4871 scope.go:117] "RemoveContainer" containerID="15beb4ef58b4c6c37162d79af2cf062329958d9a18b19b067fa2cc51d0663197" Nov 26 06:43:27 crc kubenswrapper[4871]: E1126 06:43:27.648937 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15beb4ef58b4c6c37162d79af2cf062329958d9a18b19b067fa2cc51d0663197\": container with ID starting with 15beb4ef58b4c6c37162d79af2cf062329958d9a18b19b067fa2cc51d0663197 not found: ID does not exist" containerID="15beb4ef58b4c6c37162d79af2cf062329958d9a18b19b067fa2cc51d0663197" Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.648965 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15beb4ef58b4c6c37162d79af2cf062329958d9a18b19b067fa2cc51d0663197"} err="failed to get container status \"15beb4ef58b4c6c37162d79af2cf062329958d9a18b19b067fa2cc51d0663197\": rpc error: code = NotFound desc = could not find container \"15beb4ef58b4c6c37162d79af2cf062329958d9a18b19b067fa2cc51d0663197\": container with ID starting with 15beb4ef58b4c6c37162d79af2cf062329958d9a18b19b067fa2cc51d0663197 not found: ID does not exist" Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.648987 4871 scope.go:117] "RemoveContainer" containerID="32e2094331b018a4f667cb05cee28d7da62d3c0b250dddcc5676374d7af0396a" Nov 26 06:43:27 crc kubenswrapper[4871]: E1126 06:43:27.649226 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32e2094331b018a4f667cb05cee28d7da62d3c0b250dddcc5676374d7af0396a\": container with ID starting with 32e2094331b018a4f667cb05cee28d7da62d3c0b250dddcc5676374d7af0396a not found: ID does not exist" containerID="32e2094331b018a4f667cb05cee28d7da62d3c0b250dddcc5676374d7af0396a" Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.649251 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32e2094331b018a4f667cb05cee28d7da62d3c0b250dddcc5676374d7af0396a"} err="failed to get container status \"32e2094331b018a4f667cb05cee28d7da62d3c0b250dddcc5676374d7af0396a\": rpc error: code = NotFound desc = could not find container \"32e2094331b018a4f667cb05cee28d7da62d3c0b250dddcc5676374d7af0396a\": container with ID starting with 32e2094331b018a4f667cb05cee28d7da62d3c0b250dddcc5676374d7af0396a not found: ID does not exist" Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.649269 4871 scope.go:117] "RemoveContainer" containerID="f1dfd5b0024cf0694c5591458e9ff0a696ebb9f5e1f050470da0e4fb5113bd14" Nov 26 06:43:27 crc kubenswrapper[4871]: E1126 06:43:27.649464 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1dfd5b0024cf0694c5591458e9ff0a696ebb9f5e1f050470da0e4fb5113bd14\": container with ID starting with f1dfd5b0024cf0694c5591458e9ff0a696ebb9f5e1f050470da0e4fb5113bd14 not found: ID does not exist" containerID="f1dfd5b0024cf0694c5591458e9ff0a696ebb9f5e1f050470da0e4fb5113bd14" Nov 26 06:43:27 crc kubenswrapper[4871]: I1126 06:43:27.649484 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1dfd5b0024cf0694c5591458e9ff0a696ebb9f5e1f050470da0e4fb5113bd14"} err="failed to get container status \"f1dfd5b0024cf0694c5591458e9ff0a696ebb9f5e1f050470da0e4fb5113bd14\": rpc error: code = NotFound desc = could not find container \"f1dfd5b0024cf0694c5591458e9ff0a696ebb9f5e1f050470da0e4fb5113bd14\": container with ID starting with f1dfd5b0024cf0694c5591458e9ff0a696ebb9f5e1f050470da0e4fb5113bd14 not found: ID does not exist" Nov 26 06:43:28 crc kubenswrapper[4871]: I1126 06:43:28.523891 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c78fa55-3cbb-4f45-a833-b6977ed05494" path="/var/lib/kubelet/pods/9c78fa55-3cbb-4f45-a833-b6977ed05494/volumes" Nov 26 06:44:07 crc kubenswrapper[4871]: E1126 06:44:07.289189 4871 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.44:35838->38.102.83.44:38809: read tcp 38.102.83.44:35838->38.102.83.44:38809: read: connection reset by peer Nov 26 06:44:53 crc kubenswrapper[4871]: I1126 06:44:53.300975 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-q9wrd"] Nov 26 06:44:53 crc kubenswrapper[4871]: E1126 06:44:53.302639 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c78fa55-3cbb-4f45-a833-b6977ed05494" containerName="extract-content" Nov 26 06:44:53 crc kubenswrapper[4871]: I1126 06:44:53.302674 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c78fa55-3cbb-4f45-a833-b6977ed05494" containerName="extract-content" Nov 26 06:44:53 crc kubenswrapper[4871]: E1126 06:44:53.302753 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c78fa55-3cbb-4f45-a833-b6977ed05494" containerName="extract-utilities" Nov 26 06:44:53 crc kubenswrapper[4871]: I1126 06:44:53.302771 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c78fa55-3cbb-4f45-a833-b6977ed05494" containerName="extract-utilities" Nov 26 06:44:53 crc kubenswrapper[4871]: E1126 06:44:53.302826 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c78fa55-3cbb-4f45-a833-b6977ed05494" containerName="registry-server" Nov 26 06:44:53 crc kubenswrapper[4871]: I1126 06:44:53.302844 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c78fa55-3cbb-4f45-a833-b6977ed05494" containerName="registry-server" Nov 26 06:44:53 crc kubenswrapper[4871]: I1126 06:44:53.303398 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c78fa55-3cbb-4f45-a833-b6977ed05494" containerName="registry-server" Nov 26 06:44:53 crc kubenswrapper[4871]: I1126 06:44:53.307169 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q9wrd" Nov 26 06:44:53 crc kubenswrapper[4871]: I1126 06:44:53.323082 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q9wrd"] Nov 26 06:44:53 crc kubenswrapper[4871]: I1126 06:44:53.364280 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d2bc83e-4828-46e5-813b-9adc3edee081-utilities\") pod \"redhat-operators-q9wrd\" (UID: \"6d2bc83e-4828-46e5-813b-9adc3edee081\") " pod="openshift-marketplace/redhat-operators-q9wrd" Nov 26 06:44:53 crc kubenswrapper[4871]: I1126 06:44:53.364363 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d2bc83e-4828-46e5-813b-9adc3edee081-catalog-content\") pod \"redhat-operators-q9wrd\" (UID: \"6d2bc83e-4828-46e5-813b-9adc3edee081\") " pod="openshift-marketplace/redhat-operators-q9wrd" Nov 26 06:44:53 crc kubenswrapper[4871]: I1126 06:44:53.364675 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtx8n\" (UniqueName: \"kubernetes.io/projected/6d2bc83e-4828-46e5-813b-9adc3edee081-kube-api-access-jtx8n\") pod \"redhat-operators-q9wrd\" (UID: \"6d2bc83e-4828-46e5-813b-9adc3edee081\") " pod="openshift-marketplace/redhat-operators-q9wrd" Nov 26 06:44:53 crc kubenswrapper[4871]: I1126 06:44:53.466709 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtx8n\" (UniqueName: \"kubernetes.io/projected/6d2bc83e-4828-46e5-813b-9adc3edee081-kube-api-access-jtx8n\") pod \"redhat-operators-q9wrd\" (UID: \"6d2bc83e-4828-46e5-813b-9adc3edee081\") " pod="openshift-marketplace/redhat-operators-q9wrd" Nov 26 06:44:53 crc kubenswrapper[4871]: I1126 06:44:53.466859 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d2bc83e-4828-46e5-813b-9adc3edee081-utilities\") pod \"redhat-operators-q9wrd\" (UID: \"6d2bc83e-4828-46e5-813b-9adc3edee081\") " pod="openshift-marketplace/redhat-operators-q9wrd" Nov 26 06:44:53 crc kubenswrapper[4871]: I1126 06:44:53.466897 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d2bc83e-4828-46e5-813b-9adc3edee081-catalog-content\") pod \"redhat-operators-q9wrd\" (UID: \"6d2bc83e-4828-46e5-813b-9adc3edee081\") " pod="openshift-marketplace/redhat-operators-q9wrd" Nov 26 06:44:53 crc kubenswrapper[4871]: I1126 06:44:53.467362 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d2bc83e-4828-46e5-813b-9adc3edee081-utilities\") pod \"redhat-operators-q9wrd\" (UID: \"6d2bc83e-4828-46e5-813b-9adc3edee081\") " pod="openshift-marketplace/redhat-operators-q9wrd" Nov 26 06:44:53 crc kubenswrapper[4871]: I1126 06:44:53.467407 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d2bc83e-4828-46e5-813b-9adc3edee081-catalog-content\") pod \"redhat-operators-q9wrd\" (UID: \"6d2bc83e-4828-46e5-813b-9adc3edee081\") " pod="openshift-marketplace/redhat-operators-q9wrd" Nov 26 06:44:54 crc kubenswrapper[4871]: I1126 06:44:54.058905 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtx8n\" (UniqueName: \"kubernetes.io/projected/6d2bc83e-4828-46e5-813b-9adc3edee081-kube-api-access-jtx8n\") pod \"redhat-operators-q9wrd\" (UID: \"6d2bc83e-4828-46e5-813b-9adc3edee081\") " pod="openshift-marketplace/redhat-operators-q9wrd" Nov 26 06:44:54 crc kubenswrapper[4871]: I1126 06:44:54.243336 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q9wrd" Nov 26 06:44:54 crc kubenswrapper[4871]: I1126 06:44:54.763707 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q9wrd"] Nov 26 06:44:55 crc kubenswrapper[4871]: I1126 06:44:55.450348 4871 generic.go:334] "Generic (PLEG): container finished" podID="6d2bc83e-4828-46e5-813b-9adc3edee081" containerID="bdd621d291ad32a1aacaadd4e312aee9cd2945f927e9af666fd844fca0275e04" exitCode=0 Nov 26 06:44:55 crc kubenswrapper[4871]: I1126 06:44:55.450542 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q9wrd" event={"ID":"6d2bc83e-4828-46e5-813b-9adc3edee081","Type":"ContainerDied","Data":"bdd621d291ad32a1aacaadd4e312aee9cd2945f927e9af666fd844fca0275e04"} Nov 26 06:44:55 crc kubenswrapper[4871]: I1126 06:44:55.451043 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q9wrd" event={"ID":"6d2bc83e-4828-46e5-813b-9adc3edee081","Type":"ContainerStarted","Data":"235a2208699cf984397fdc6ee159f1068d0a0ff1922d64483661554bad941ad3"} Nov 26 06:44:57 crc kubenswrapper[4871]: I1126 06:44:57.471976 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q9wrd" event={"ID":"6d2bc83e-4828-46e5-813b-9adc3edee081","Type":"ContainerStarted","Data":"f74e29f2f4476b81b1bb774ef2ad910799cb1aa524735b23da67a9b240a90b03"} Nov 26 06:45:00 crc kubenswrapper[4871]: I1126 06:45:00.167094 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402325-g58rq"] Nov 26 06:45:00 crc kubenswrapper[4871]: I1126 06:45:00.169935 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-g58rq" Nov 26 06:45:00 crc kubenswrapper[4871]: I1126 06:45:00.172767 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 06:45:00 crc kubenswrapper[4871]: I1126 06:45:00.173289 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 06:45:00 crc kubenswrapper[4871]: I1126 06:45:00.176834 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402325-g58rq"] Nov 26 06:45:00 crc kubenswrapper[4871]: I1126 06:45:00.228207 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4kfk\" (UniqueName: \"kubernetes.io/projected/729854f9-cb41-4a5e-90c1-361eb0de32f7-kube-api-access-d4kfk\") pod \"collect-profiles-29402325-g58rq\" (UID: \"729854f9-cb41-4a5e-90c1-361eb0de32f7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-g58rq" Nov 26 06:45:00 crc kubenswrapper[4871]: I1126 06:45:00.228314 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/729854f9-cb41-4a5e-90c1-361eb0de32f7-config-volume\") pod \"collect-profiles-29402325-g58rq\" (UID: \"729854f9-cb41-4a5e-90c1-361eb0de32f7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-g58rq" Nov 26 06:45:00 crc kubenswrapper[4871]: I1126 06:45:00.228740 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/729854f9-cb41-4a5e-90c1-361eb0de32f7-secret-volume\") pod \"collect-profiles-29402325-g58rq\" (UID: \"729854f9-cb41-4a5e-90c1-361eb0de32f7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-g58rq" Nov 26 06:45:00 crc kubenswrapper[4871]: I1126 06:45:00.330514 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4kfk\" (UniqueName: \"kubernetes.io/projected/729854f9-cb41-4a5e-90c1-361eb0de32f7-kube-api-access-d4kfk\") pod \"collect-profiles-29402325-g58rq\" (UID: \"729854f9-cb41-4a5e-90c1-361eb0de32f7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-g58rq" Nov 26 06:45:00 crc kubenswrapper[4871]: I1126 06:45:00.330640 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/729854f9-cb41-4a5e-90c1-361eb0de32f7-config-volume\") pod \"collect-profiles-29402325-g58rq\" (UID: \"729854f9-cb41-4a5e-90c1-361eb0de32f7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-g58rq" Nov 26 06:45:00 crc kubenswrapper[4871]: I1126 06:45:00.330736 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/729854f9-cb41-4a5e-90c1-361eb0de32f7-secret-volume\") pod \"collect-profiles-29402325-g58rq\" (UID: \"729854f9-cb41-4a5e-90c1-361eb0de32f7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-g58rq" Nov 26 06:45:00 crc kubenswrapper[4871]: I1126 06:45:00.331819 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/729854f9-cb41-4a5e-90c1-361eb0de32f7-config-volume\") pod \"collect-profiles-29402325-g58rq\" (UID: \"729854f9-cb41-4a5e-90c1-361eb0de32f7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-g58rq" Nov 26 06:45:00 crc kubenswrapper[4871]: I1126 06:45:00.336504 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/729854f9-cb41-4a5e-90c1-361eb0de32f7-secret-volume\") pod \"collect-profiles-29402325-g58rq\" (UID: \"729854f9-cb41-4a5e-90c1-361eb0de32f7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-g58rq" Nov 26 06:45:00 crc kubenswrapper[4871]: I1126 06:45:00.350653 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4kfk\" (UniqueName: \"kubernetes.io/projected/729854f9-cb41-4a5e-90c1-361eb0de32f7-kube-api-access-d4kfk\") pod \"collect-profiles-29402325-g58rq\" (UID: \"729854f9-cb41-4a5e-90c1-361eb0de32f7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-g58rq" Nov 26 06:45:00 crc kubenswrapper[4871]: I1126 06:45:00.492825 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-g58rq" Nov 26 06:45:00 crc kubenswrapper[4871]: I1126 06:45:00.992382 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402325-g58rq"] Nov 26 06:45:01 crc kubenswrapper[4871]: I1126 06:45:01.511901 4871 generic.go:334] "Generic (PLEG): container finished" podID="729854f9-cb41-4a5e-90c1-361eb0de32f7" containerID="24a8f1a96b3ede7691b20f93a147c632f84272f7629c88fb5450a83f406e6a0b" exitCode=0 Nov 26 06:45:01 crc kubenswrapper[4871]: I1126 06:45:01.511950 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-g58rq" event={"ID":"729854f9-cb41-4a5e-90c1-361eb0de32f7","Type":"ContainerDied","Data":"24a8f1a96b3ede7691b20f93a147c632f84272f7629c88fb5450a83f406e6a0b"} Nov 26 06:45:01 crc kubenswrapper[4871]: I1126 06:45:01.512017 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-g58rq" event={"ID":"729854f9-cb41-4a5e-90c1-361eb0de32f7","Type":"ContainerStarted","Data":"94a9ae72700efbd68f743deb6432ab9c8970fb70a888fd0c41a4190877f1f5fa"} Nov 26 06:45:01 crc kubenswrapper[4871]: I1126 06:45:01.514451 4871 generic.go:334] "Generic (PLEG): container finished" podID="6d2bc83e-4828-46e5-813b-9adc3edee081" containerID="f74e29f2f4476b81b1bb774ef2ad910799cb1aa524735b23da67a9b240a90b03" exitCode=0 Nov 26 06:45:01 crc kubenswrapper[4871]: I1126 06:45:01.514489 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q9wrd" event={"ID":"6d2bc83e-4828-46e5-813b-9adc3edee081","Type":"ContainerDied","Data":"f74e29f2f4476b81b1bb774ef2ad910799cb1aa524735b23da67a9b240a90b03"} Nov 26 06:45:02 crc kubenswrapper[4871]: I1126 06:45:02.552883 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q9wrd" event={"ID":"6d2bc83e-4828-46e5-813b-9adc3edee081","Type":"ContainerStarted","Data":"8203449a7c3430a569f1da49bff4fd930640c0754802c524f91e6af3ac8fe343"} Nov 26 06:45:02 crc kubenswrapper[4871]: I1126 06:45:02.582689 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-q9wrd" podStartSLOduration=3.083532634 podStartE2EDuration="9.58267082s" podCreationTimestamp="2025-11-26 06:44:53 +0000 UTC" firstStartedPulling="2025-11-26 06:44:55.453303799 +0000 UTC m=+4753.636355385" lastFinishedPulling="2025-11-26 06:45:01.952441985 +0000 UTC m=+4760.135493571" observedRunningTime="2025-11-26 06:45:02.572631752 +0000 UTC m=+4760.755683348" watchObservedRunningTime="2025-11-26 06:45:02.58267082 +0000 UTC m=+4760.765722436" Nov 26 06:45:02 crc kubenswrapper[4871]: I1126 06:45:02.911678 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-g58rq" Nov 26 06:45:02 crc kubenswrapper[4871]: I1126 06:45:02.987766 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/729854f9-cb41-4a5e-90c1-361eb0de32f7-secret-volume\") pod \"729854f9-cb41-4a5e-90c1-361eb0de32f7\" (UID: \"729854f9-cb41-4a5e-90c1-361eb0de32f7\") " Nov 26 06:45:02 crc kubenswrapper[4871]: I1126 06:45:02.988153 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/729854f9-cb41-4a5e-90c1-361eb0de32f7-config-volume\") pod \"729854f9-cb41-4a5e-90c1-361eb0de32f7\" (UID: \"729854f9-cb41-4a5e-90c1-361eb0de32f7\") " Nov 26 06:45:02 crc kubenswrapper[4871]: I1126 06:45:02.988217 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4kfk\" (UniqueName: \"kubernetes.io/projected/729854f9-cb41-4a5e-90c1-361eb0de32f7-kube-api-access-d4kfk\") pod \"729854f9-cb41-4a5e-90c1-361eb0de32f7\" (UID: \"729854f9-cb41-4a5e-90c1-361eb0de32f7\") " Nov 26 06:45:02 crc kubenswrapper[4871]: I1126 06:45:02.988758 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/729854f9-cb41-4a5e-90c1-361eb0de32f7-config-volume" (OuterVolumeSpecName: "config-volume") pod "729854f9-cb41-4a5e-90c1-361eb0de32f7" (UID: "729854f9-cb41-4a5e-90c1-361eb0de32f7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:45:02 crc kubenswrapper[4871]: I1126 06:45:02.994697 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/729854f9-cb41-4a5e-90c1-361eb0de32f7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "729854f9-cb41-4a5e-90c1-361eb0de32f7" (UID: "729854f9-cb41-4a5e-90c1-361eb0de32f7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:45:02 crc kubenswrapper[4871]: I1126 06:45:02.994797 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/729854f9-cb41-4a5e-90c1-361eb0de32f7-kube-api-access-d4kfk" (OuterVolumeSpecName: "kube-api-access-d4kfk") pod "729854f9-cb41-4a5e-90c1-361eb0de32f7" (UID: "729854f9-cb41-4a5e-90c1-361eb0de32f7"). InnerVolumeSpecName "kube-api-access-d4kfk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:45:03 crc kubenswrapper[4871]: I1126 06:45:03.090591 4871 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/729854f9-cb41-4a5e-90c1-361eb0de32f7-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 06:45:03 crc kubenswrapper[4871]: I1126 06:45:03.090621 4871 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/729854f9-cb41-4a5e-90c1-361eb0de32f7-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 06:45:03 crc kubenswrapper[4871]: I1126 06:45:03.090631 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4kfk\" (UniqueName: \"kubernetes.io/projected/729854f9-cb41-4a5e-90c1-361eb0de32f7-kube-api-access-d4kfk\") on node \"crc\" DevicePath \"\"" Nov 26 06:45:03 crc kubenswrapper[4871]: I1126 06:45:03.566125 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-g58rq" event={"ID":"729854f9-cb41-4a5e-90c1-361eb0de32f7","Type":"ContainerDied","Data":"94a9ae72700efbd68f743deb6432ab9c8970fb70a888fd0c41a4190877f1f5fa"} Nov 26 06:45:03 crc kubenswrapper[4871]: I1126 06:45:03.566164 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="94a9ae72700efbd68f743deb6432ab9c8970fb70a888fd0c41a4190877f1f5fa" Nov 26 06:45:03 crc kubenswrapper[4871]: I1126 06:45:03.566217 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402325-g58rq" Nov 26 06:45:03 crc kubenswrapper[4871]: I1126 06:45:03.999188 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc"] Nov 26 06:45:04 crc kubenswrapper[4871]: I1126 06:45:04.008883 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402280-gnfvc"] Nov 26 06:45:04 crc kubenswrapper[4871]: I1126 06:45:04.243760 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-q9wrd" Nov 26 06:45:04 crc kubenswrapper[4871]: I1126 06:45:04.244012 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-q9wrd" Nov 26 06:45:04 crc kubenswrapper[4871]: I1126 06:45:04.521415 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b032092f-123d-4532-8193-05c7afe3011d" path="/var/lib/kubelet/pods/b032092f-123d-4532-8193-05c7afe3011d/volumes" Nov 26 06:45:05 crc kubenswrapper[4871]: I1126 06:45:05.289603 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-q9wrd" podUID="6d2bc83e-4828-46e5-813b-9adc3edee081" containerName="registry-server" probeResult="failure" output=< Nov 26 06:45:05 crc kubenswrapper[4871]: timeout: failed to connect service ":50051" within 1s Nov 26 06:45:05 crc kubenswrapper[4871]: > Nov 26 06:45:15 crc kubenswrapper[4871]: I1126 06:45:15.297453 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-q9wrd" podUID="6d2bc83e-4828-46e5-813b-9adc3edee081" containerName="registry-server" probeResult="failure" output=< Nov 26 06:45:15 crc kubenswrapper[4871]: timeout: failed to connect service ":50051" within 1s Nov 26 06:45:15 crc kubenswrapper[4871]: > Nov 26 06:45:23 crc kubenswrapper[4871]: I1126 06:45:23.614850 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:45:23 crc kubenswrapper[4871]: I1126 06:45:23.616599 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:45:24 crc kubenswrapper[4871]: I1126 06:45:24.320958 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-q9wrd" Nov 26 06:45:24 crc kubenswrapper[4871]: I1126 06:45:24.372072 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-q9wrd" Nov 26 06:45:24 crc kubenswrapper[4871]: I1126 06:45:24.411624 4871 scope.go:117] "RemoveContainer" containerID="ea430ac90e595af09c8ffb9a0c1286ce1636bce501c829a7b68af6accad36bd1" Nov 26 06:45:24 crc kubenswrapper[4871]: I1126 06:45:24.558266 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q9wrd"] Nov 26 06:45:26 crc kubenswrapper[4871]: I1126 06:45:26.088974 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-q9wrd" podUID="6d2bc83e-4828-46e5-813b-9adc3edee081" containerName="registry-server" containerID="cri-o://8203449a7c3430a569f1da49bff4fd930640c0754802c524f91e6af3ac8fe343" gracePeriod=2 Nov 26 06:45:26 crc kubenswrapper[4871]: I1126 06:45:26.589079 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q9wrd" Nov 26 06:45:26 crc kubenswrapper[4871]: I1126 06:45:26.751621 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d2bc83e-4828-46e5-813b-9adc3edee081-catalog-content\") pod \"6d2bc83e-4828-46e5-813b-9adc3edee081\" (UID: \"6d2bc83e-4828-46e5-813b-9adc3edee081\") " Nov 26 06:45:26 crc kubenswrapper[4871]: I1126 06:45:26.751897 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d2bc83e-4828-46e5-813b-9adc3edee081-utilities\") pod \"6d2bc83e-4828-46e5-813b-9adc3edee081\" (UID: \"6d2bc83e-4828-46e5-813b-9adc3edee081\") " Nov 26 06:45:26 crc kubenswrapper[4871]: I1126 06:45:26.752012 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtx8n\" (UniqueName: \"kubernetes.io/projected/6d2bc83e-4828-46e5-813b-9adc3edee081-kube-api-access-jtx8n\") pod \"6d2bc83e-4828-46e5-813b-9adc3edee081\" (UID: \"6d2bc83e-4828-46e5-813b-9adc3edee081\") " Nov 26 06:45:26 crc kubenswrapper[4871]: I1126 06:45:26.752383 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d2bc83e-4828-46e5-813b-9adc3edee081-utilities" (OuterVolumeSpecName: "utilities") pod "6d2bc83e-4828-46e5-813b-9adc3edee081" (UID: "6d2bc83e-4828-46e5-813b-9adc3edee081"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:45:26 crc kubenswrapper[4871]: I1126 06:45:26.752632 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d2bc83e-4828-46e5-813b-9adc3edee081-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:45:26 crc kubenswrapper[4871]: I1126 06:45:26.758717 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d2bc83e-4828-46e5-813b-9adc3edee081-kube-api-access-jtx8n" (OuterVolumeSpecName: "kube-api-access-jtx8n") pod "6d2bc83e-4828-46e5-813b-9adc3edee081" (UID: "6d2bc83e-4828-46e5-813b-9adc3edee081"). InnerVolumeSpecName "kube-api-access-jtx8n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:45:26 crc kubenswrapper[4871]: I1126 06:45:26.854761 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtx8n\" (UniqueName: \"kubernetes.io/projected/6d2bc83e-4828-46e5-813b-9adc3edee081-kube-api-access-jtx8n\") on node \"crc\" DevicePath \"\"" Nov 26 06:45:26 crc kubenswrapper[4871]: I1126 06:45:26.859280 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d2bc83e-4828-46e5-813b-9adc3edee081-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6d2bc83e-4828-46e5-813b-9adc3edee081" (UID: "6d2bc83e-4828-46e5-813b-9adc3edee081"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:45:26 crc kubenswrapper[4871]: I1126 06:45:26.956536 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d2bc83e-4828-46e5-813b-9adc3edee081-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:45:27 crc kubenswrapper[4871]: I1126 06:45:27.104039 4871 generic.go:334] "Generic (PLEG): container finished" podID="6d2bc83e-4828-46e5-813b-9adc3edee081" containerID="8203449a7c3430a569f1da49bff4fd930640c0754802c524f91e6af3ac8fe343" exitCode=0 Nov 26 06:45:27 crc kubenswrapper[4871]: I1126 06:45:27.104080 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q9wrd" event={"ID":"6d2bc83e-4828-46e5-813b-9adc3edee081","Type":"ContainerDied","Data":"8203449a7c3430a569f1da49bff4fd930640c0754802c524f91e6af3ac8fe343"} Nov 26 06:45:27 crc kubenswrapper[4871]: I1126 06:45:27.104104 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q9wrd" event={"ID":"6d2bc83e-4828-46e5-813b-9adc3edee081","Type":"ContainerDied","Data":"235a2208699cf984397fdc6ee159f1068d0a0ff1922d64483661554bad941ad3"} Nov 26 06:45:27 crc kubenswrapper[4871]: I1126 06:45:27.104121 4871 scope.go:117] "RemoveContainer" containerID="8203449a7c3430a569f1da49bff4fd930640c0754802c524f91e6af3ac8fe343" Nov 26 06:45:27 crc kubenswrapper[4871]: I1126 06:45:27.104251 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q9wrd" Nov 26 06:45:27 crc kubenswrapper[4871]: I1126 06:45:27.144810 4871 scope.go:117] "RemoveContainer" containerID="f74e29f2f4476b81b1bb774ef2ad910799cb1aa524735b23da67a9b240a90b03" Nov 26 06:45:27 crc kubenswrapper[4871]: I1126 06:45:27.152405 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q9wrd"] Nov 26 06:45:27 crc kubenswrapper[4871]: I1126 06:45:27.161572 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-q9wrd"] Nov 26 06:45:27 crc kubenswrapper[4871]: I1126 06:45:27.382819 4871 scope.go:117] "RemoveContainer" containerID="bdd621d291ad32a1aacaadd4e312aee9cd2945f927e9af666fd844fca0275e04" Nov 26 06:45:27 crc kubenswrapper[4871]: I1126 06:45:27.420960 4871 scope.go:117] "RemoveContainer" containerID="8203449a7c3430a569f1da49bff4fd930640c0754802c524f91e6af3ac8fe343" Nov 26 06:45:27 crc kubenswrapper[4871]: E1126 06:45:27.421623 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8203449a7c3430a569f1da49bff4fd930640c0754802c524f91e6af3ac8fe343\": container with ID starting with 8203449a7c3430a569f1da49bff4fd930640c0754802c524f91e6af3ac8fe343 not found: ID does not exist" containerID="8203449a7c3430a569f1da49bff4fd930640c0754802c524f91e6af3ac8fe343" Nov 26 06:45:27 crc kubenswrapper[4871]: I1126 06:45:27.421674 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8203449a7c3430a569f1da49bff4fd930640c0754802c524f91e6af3ac8fe343"} err="failed to get container status \"8203449a7c3430a569f1da49bff4fd930640c0754802c524f91e6af3ac8fe343\": rpc error: code = NotFound desc = could not find container \"8203449a7c3430a569f1da49bff4fd930640c0754802c524f91e6af3ac8fe343\": container with ID starting with 8203449a7c3430a569f1da49bff4fd930640c0754802c524f91e6af3ac8fe343 not found: ID does not exist" Nov 26 06:45:27 crc kubenswrapper[4871]: I1126 06:45:27.421732 4871 scope.go:117] "RemoveContainer" containerID="f74e29f2f4476b81b1bb774ef2ad910799cb1aa524735b23da67a9b240a90b03" Nov 26 06:45:27 crc kubenswrapper[4871]: E1126 06:45:27.422272 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f74e29f2f4476b81b1bb774ef2ad910799cb1aa524735b23da67a9b240a90b03\": container with ID starting with f74e29f2f4476b81b1bb774ef2ad910799cb1aa524735b23da67a9b240a90b03 not found: ID does not exist" containerID="f74e29f2f4476b81b1bb774ef2ad910799cb1aa524735b23da67a9b240a90b03" Nov 26 06:45:27 crc kubenswrapper[4871]: I1126 06:45:27.422303 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f74e29f2f4476b81b1bb774ef2ad910799cb1aa524735b23da67a9b240a90b03"} err="failed to get container status \"f74e29f2f4476b81b1bb774ef2ad910799cb1aa524735b23da67a9b240a90b03\": rpc error: code = NotFound desc = could not find container \"f74e29f2f4476b81b1bb774ef2ad910799cb1aa524735b23da67a9b240a90b03\": container with ID starting with f74e29f2f4476b81b1bb774ef2ad910799cb1aa524735b23da67a9b240a90b03 not found: ID does not exist" Nov 26 06:45:27 crc kubenswrapper[4871]: I1126 06:45:27.422322 4871 scope.go:117] "RemoveContainer" containerID="bdd621d291ad32a1aacaadd4e312aee9cd2945f927e9af666fd844fca0275e04" Nov 26 06:45:27 crc kubenswrapper[4871]: E1126 06:45:27.422672 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdd621d291ad32a1aacaadd4e312aee9cd2945f927e9af666fd844fca0275e04\": container with ID starting with bdd621d291ad32a1aacaadd4e312aee9cd2945f927e9af666fd844fca0275e04 not found: ID does not exist" containerID="bdd621d291ad32a1aacaadd4e312aee9cd2945f927e9af666fd844fca0275e04" Nov 26 06:45:27 crc kubenswrapper[4871]: I1126 06:45:27.422719 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdd621d291ad32a1aacaadd4e312aee9cd2945f927e9af666fd844fca0275e04"} err="failed to get container status \"bdd621d291ad32a1aacaadd4e312aee9cd2945f927e9af666fd844fca0275e04\": rpc error: code = NotFound desc = could not find container \"bdd621d291ad32a1aacaadd4e312aee9cd2945f927e9af666fd844fca0275e04\": container with ID starting with bdd621d291ad32a1aacaadd4e312aee9cd2945f927e9af666fd844fca0275e04 not found: ID does not exist" Nov 26 06:45:28 crc kubenswrapper[4871]: I1126 06:45:28.527190 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d2bc83e-4828-46e5-813b-9adc3edee081" path="/var/lib/kubelet/pods/6d2bc83e-4828-46e5-813b-9adc3edee081/volumes" Nov 26 06:45:53 crc kubenswrapper[4871]: I1126 06:45:53.614351 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:45:53 crc kubenswrapper[4871]: I1126 06:45:53.614785 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:46:23 crc kubenswrapper[4871]: I1126 06:46:23.615114 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:46:23 crc kubenswrapper[4871]: I1126 06:46:23.615556 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:46:23 crc kubenswrapper[4871]: I1126 06:46:23.615605 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 06:46:23 crc kubenswrapper[4871]: I1126 06:46:23.616363 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9c988cadcd1c7db807d7f1faa554de6ce79af88aeccc2c41477f1c97e057816d"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 06:46:23 crc kubenswrapper[4871]: I1126 06:46:23.616417 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://9c988cadcd1c7db807d7f1faa554de6ce79af88aeccc2c41477f1c97e057816d" gracePeriod=600 Nov 26 06:46:24 crc kubenswrapper[4871]: I1126 06:46:24.741849 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="9c988cadcd1c7db807d7f1faa554de6ce79af88aeccc2c41477f1c97e057816d" exitCode=0 Nov 26 06:46:24 crc kubenswrapper[4871]: I1126 06:46:24.741939 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"9c988cadcd1c7db807d7f1faa554de6ce79af88aeccc2c41477f1c97e057816d"} Nov 26 06:46:24 crc kubenswrapper[4871]: I1126 06:46:24.742507 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41"} Nov 26 06:46:24 crc kubenswrapper[4871]: I1126 06:46:24.742597 4871 scope.go:117] "RemoveContainer" containerID="2d8c81c0e8f8d4b35d119bf5d63b76cc737090c041112a634851e6f02a3af3bd" Nov 26 06:47:34 crc kubenswrapper[4871]: I1126 06:47:34.348665 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-kjcbj"] Nov 26 06:47:34 crc kubenswrapper[4871]: E1126 06:47:34.349675 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="729854f9-cb41-4a5e-90c1-361eb0de32f7" containerName="collect-profiles" Nov 26 06:47:34 crc kubenswrapper[4871]: I1126 06:47:34.349694 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="729854f9-cb41-4a5e-90c1-361eb0de32f7" containerName="collect-profiles" Nov 26 06:47:34 crc kubenswrapper[4871]: E1126 06:47:34.349738 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d2bc83e-4828-46e5-813b-9adc3edee081" containerName="extract-content" Nov 26 06:47:34 crc kubenswrapper[4871]: I1126 06:47:34.349747 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d2bc83e-4828-46e5-813b-9adc3edee081" containerName="extract-content" Nov 26 06:47:34 crc kubenswrapper[4871]: E1126 06:47:34.349764 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d2bc83e-4828-46e5-813b-9adc3edee081" containerName="extract-utilities" Nov 26 06:47:34 crc kubenswrapper[4871]: I1126 06:47:34.349772 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d2bc83e-4828-46e5-813b-9adc3edee081" containerName="extract-utilities" Nov 26 06:47:34 crc kubenswrapper[4871]: E1126 06:47:34.349793 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d2bc83e-4828-46e5-813b-9adc3edee081" containerName="registry-server" Nov 26 06:47:34 crc kubenswrapper[4871]: I1126 06:47:34.349801 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d2bc83e-4828-46e5-813b-9adc3edee081" containerName="registry-server" Nov 26 06:47:34 crc kubenswrapper[4871]: I1126 06:47:34.350021 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d2bc83e-4828-46e5-813b-9adc3edee081" containerName="registry-server" Nov 26 06:47:34 crc kubenswrapper[4871]: I1126 06:47:34.350071 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="729854f9-cb41-4a5e-90c1-361eb0de32f7" containerName="collect-profiles" Nov 26 06:47:34 crc kubenswrapper[4871]: I1126 06:47:34.352089 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kjcbj" Nov 26 06:47:34 crc kubenswrapper[4871]: I1126 06:47:34.363165 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kjcbj"] Nov 26 06:47:34 crc kubenswrapper[4871]: I1126 06:47:34.438966 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdf42cd-1acb-4e73-a162-055680011437-catalog-content\") pod \"community-operators-kjcbj\" (UID: \"9cdf42cd-1acb-4e73-a162-055680011437\") " pod="openshift-marketplace/community-operators-kjcbj" Nov 26 06:47:34 crc kubenswrapper[4871]: I1126 06:47:34.439160 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9dhz\" (UniqueName: \"kubernetes.io/projected/9cdf42cd-1acb-4e73-a162-055680011437-kube-api-access-p9dhz\") pod \"community-operators-kjcbj\" (UID: \"9cdf42cd-1acb-4e73-a162-055680011437\") " pod="openshift-marketplace/community-operators-kjcbj" Nov 26 06:47:34 crc kubenswrapper[4871]: I1126 06:47:34.439217 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdf42cd-1acb-4e73-a162-055680011437-utilities\") pod \"community-operators-kjcbj\" (UID: \"9cdf42cd-1acb-4e73-a162-055680011437\") " pod="openshift-marketplace/community-operators-kjcbj" Nov 26 06:47:34 crc kubenswrapper[4871]: I1126 06:47:34.542312 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9dhz\" (UniqueName: \"kubernetes.io/projected/9cdf42cd-1acb-4e73-a162-055680011437-kube-api-access-p9dhz\") pod \"community-operators-kjcbj\" (UID: \"9cdf42cd-1acb-4e73-a162-055680011437\") " pod="openshift-marketplace/community-operators-kjcbj" Nov 26 06:47:34 crc kubenswrapper[4871]: I1126 06:47:34.542423 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdf42cd-1acb-4e73-a162-055680011437-utilities\") pod \"community-operators-kjcbj\" (UID: \"9cdf42cd-1acb-4e73-a162-055680011437\") " pod="openshift-marketplace/community-operators-kjcbj" Nov 26 06:47:34 crc kubenswrapper[4871]: I1126 06:47:34.542543 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdf42cd-1acb-4e73-a162-055680011437-catalog-content\") pod \"community-operators-kjcbj\" (UID: \"9cdf42cd-1acb-4e73-a162-055680011437\") " pod="openshift-marketplace/community-operators-kjcbj" Nov 26 06:47:34 crc kubenswrapper[4871]: I1126 06:47:34.543174 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdf42cd-1acb-4e73-a162-055680011437-catalog-content\") pod \"community-operators-kjcbj\" (UID: \"9cdf42cd-1acb-4e73-a162-055680011437\") " pod="openshift-marketplace/community-operators-kjcbj" Nov 26 06:47:34 crc kubenswrapper[4871]: I1126 06:47:34.543278 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdf42cd-1acb-4e73-a162-055680011437-utilities\") pod \"community-operators-kjcbj\" (UID: \"9cdf42cd-1acb-4e73-a162-055680011437\") " pod="openshift-marketplace/community-operators-kjcbj" Nov 26 06:47:34 crc kubenswrapper[4871]: I1126 06:47:34.566378 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9dhz\" (UniqueName: \"kubernetes.io/projected/9cdf42cd-1acb-4e73-a162-055680011437-kube-api-access-p9dhz\") pod \"community-operators-kjcbj\" (UID: \"9cdf42cd-1acb-4e73-a162-055680011437\") " pod="openshift-marketplace/community-operators-kjcbj" Nov 26 06:47:34 crc kubenswrapper[4871]: I1126 06:47:34.678201 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kjcbj" Nov 26 06:47:35 crc kubenswrapper[4871]: I1126 06:47:35.246094 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kjcbj"] Nov 26 06:47:35 crc kubenswrapper[4871]: I1126 06:47:35.466196 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kjcbj" event={"ID":"9cdf42cd-1acb-4e73-a162-055680011437","Type":"ContainerStarted","Data":"8005ff27bdaae2ef18091a6f73ce0c55ae73102e004d8ba05abc96f4ff777348"} Nov 26 06:47:35 crc kubenswrapper[4871]: I1126 06:47:35.466615 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kjcbj" event={"ID":"9cdf42cd-1acb-4e73-a162-055680011437","Type":"ContainerStarted","Data":"7a6101a2686233f65439db8a655699b23d7a5eb6fb3e246f4e801ada66e8cafa"} Nov 26 06:47:36 crc kubenswrapper[4871]: I1126 06:47:36.480682 4871 generic.go:334] "Generic (PLEG): container finished" podID="9cdf42cd-1acb-4e73-a162-055680011437" containerID="8005ff27bdaae2ef18091a6f73ce0c55ae73102e004d8ba05abc96f4ff777348" exitCode=0 Nov 26 06:47:36 crc kubenswrapper[4871]: I1126 06:47:36.480777 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kjcbj" event={"ID":"9cdf42cd-1acb-4e73-a162-055680011437","Type":"ContainerDied","Data":"8005ff27bdaae2ef18091a6f73ce0c55ae73102e004d8ba05abc96f4ff777348"} Nov 26 06:47:37 crc kubenswrapper[4871]: I1126 06:47:37.496025 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kjcbj" event={"ID":"9cdf42cd-1acb-4e73-a162-055680011437","Type":"ContainerStarted","Data":"b7990eadef8f271297f6c547ac596b961e463a6bb17d4505ad578d4d7b14af54"} Nov 26 06:47:38 crc kubenswrapper[4871]: I1126 06:47:38.509825 4871 generic.go:334] "Generic (PLEG): container finished" podID="9cdf42cd-1acb-4e73-a162-055680011437" containerID="b7990eadef8f271297f6c547ac596b961e463a6bb17d4505ad578d4d7b14af54" exitCode=0 Nov 26 06:47:38 crc kubenswrapper[4871]: I1126 06:47:38.527397 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kjcbj" event={"ID":"9cdf42cd-1acb-4e73-a162-055680011437","Type":"ContainerDied","Data":"b7990eadef8f271297f6c547ac596b961e463a6bb17d4505ad578d4d7b14af54"} Nov 26 06:47:39 crc kubenswrapper[4871]: I1126 06:47:39.521747 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kjcbj" event={"ID":"9cdf42cd-1acb-4e73-a162-055680011437","Type":"ContainerStarted","Data":"42e29c7d7718a12bff325807da4091fe2c0be3b354e2a49acba84278ee0c6498"} Nov 26 06:47:39 crc kubenswrapper[4871]: I1126 06:47:39.540188 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-kjcbj" podStartSLOduration=3.114664979 podStartE2EDuration="5.540170902s" podCreationTimestamp="2025-11-26 06:47:34 +0000 UTC" firstStartedPulling="2025-11-26 06:47:36.483942681 +0000 UTC m=+4914.666994267" lastFinishedPulling="2025-11-26 06:47:38.909448564 +0000 UTC m=+4917.092500190" observedRunningTime="2025-11-26 06:47:39.538358177 +0000 UTC m=+4917.721409793" watchObservedRunningTime="2025-11-26 06:47:39.540170902 +0000 UTC m=+4917.723222488" Nov 26 06:47:44 crc kubenswrapper[4871]: I1126 06:47:44.678449 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-kjcbj" Nov 26 06:47:44 crc kubenswrapper[4871]: I1126 06:47:44.679637 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-kjcbj" Nov 26 06:47:44 crc kubenswrapper[4871]: I1126 06:47:44.733864 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-kjcbj" Nov 26 06:47:45 crc kubenswrapper[4871]: I1126 06:47:45.649233 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-kjcbj" Nov 26 06:47:45 crc kubenswrapper[4871]: I1126 06:47:45.697604 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kjcbj"] Nov 26 06:47:47 crc kubenswrapper[4871]: I1126 06:47:47.624328 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-kjcbj" podUID="9cdf42cd-1acb-4e73-a162-055680011437" containerName="registry-server" containerID="cri-o://42e29c7d7718a12bff325807da4091fe2c0be3b354e2a49acba84278ee0c6498" gracePeriod=2 Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.206128 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kjcbj" Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.318020 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdf42cd-1acb-4e73-a162-055680011437-utilities\") pod \"9cdf42cd-1acb-4e73-a162-055680011437\" (UID: \"9cdf42cd-1acb-4e73-a162-055680011437\") " Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.318310 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9dhz\" (UniqueName: \"kubernetes.io/projected/9cdf42cd-1acb-4e73-a162-055680011437-kube-api-access-p9dhz\") pod \"9cdf42cd-1acb-4e73-a162-055680011437\" (UID: \"9cdf42cd-1acb-4e73-a162-055680011437\") " Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.318542 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdf42cd-1acb-4e73-a162-055680011437-catalog-content\") pod \"9cdf42cd-1acb-4e73-a162-055680011437\" (UID: \"9cdf42cd-1acb-4e73-a162-055680011437\") " Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.321277 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9cdf42cd-1acb-4e73-a162-055680011437-utilities" (OuterVolumeSpecName: "utilities") pod "9cdf42cd-1acb-4e73-a162-055680011437" (UID: "9cdf42cd-1acb-4e73-a162-055680011437"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.333201 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cdf42cd-1acb-4e73-a162-055680011437-kube-api-access-p9dhz" (OuterVolumeSpecName: "kube-api-access-p9dhz") pod "9cdf42cd-1acb-4e73-a162-055680011437" (UID: "9cdf42cd-1acb-4e73-a162-055680011437"). InnerVolumeSpecName "kube-api-access-p9dhz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.370928 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9cdf42cd-1acb-4e73-a162-055680011437-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9cdf42cd-1acb-4e73-a162-055680011437" (UID: "9cdf42cd-1acb-4e73-a162-055680011437"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.420579 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cdf42cd-1acb-4e73-a162-055680011437-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.420617 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cdf42cd-1acb-4e73-a162-055680011437-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.420631 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9dhz\" (UniqueName: \"kubernetes.io/projected/9cdf42cd-1acb-4e73-a162-055680011437-kube-api-access-p9dhz\") on node \"crc\" DevicePath \"\"" Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.637303 4871 generic.go:334] "Generic (PLEG): container finished" podID="9cdf42cd-1acb-4e73-a162-055680011437" containerID="42e29c7d7718a12bff325807da4091fe2c0be3b354e2a49acba84278ee0c6498" exitCode=0 Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.637342 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kjcbj" event={"ID":"9cdf42cd-1acb-4e73-a162-055680011437","Type":"ContainerDied","Data":"42e29c7d7718a12bff325807da4091fe2c0be3b354e2a49acba84278ee0c6498"} Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.637366 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kjcbj" event={"ID":"9cdf42cd-1acb-4e73-a162-055680011437","Type":"ContainerDied","Data":"7a6101a2686233f65439db8a655699b23d7a5eb6fb3e246f4e801ada66e8cafa"} Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.637382 4871 scope.go:117] "RemoveContainer" containerID="42e29c7d7718a12bff325807da4091fe2c0be3b354e2a49acba84278ee0c6498" Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.637419 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kjcbj" Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.669724 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kjcbj"] Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.671500 4871 scope.go:117] "RemoveContainer" containerID="b7990eadef8f271297f6c547ac596b961e463a6bb17d4505ad578d4d7b14af54" Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.681484 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-kjcbj"] Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.718077 4871 scope.go:117] "RemoveContainer" containerID="8005ff27bdaae2ef18091a6f73ce0c55ae73102e004d8ba05abc96f4ff777348" Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.754236 4871 scope.go:117] "RemoveContainer" containerID="42e29c7d7718a12bff325807da4091fe2c0be3b354e2a49acba84278ee0c6498" Nov 26 06:47:48 crc kubenswrapper[4871]: E1126 06:47:48.754680 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42e29c7d7718a12bff325807da4091fe2c0be3b354e2a49acba84278ee0c6498\": container with ID starting with 42e29c7d7718a12bff325807da4091fe2c0be3b354e2a49acba84278ee0c6498 not found: ID does not exist" containerID="42e29c7d7718a12bff325807da4091fe2c0be3b354e2a49acba84278ee0c6498" Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.754766 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42e29c7d7718a12bff325807da4091fe2c0be3b354e2a49acba84278ee0c6498"} err="failed to get container status \"42e29c7d7718a12bff325807da4091fe2c0be3b354e2a49acba84278ee0c6498\": rpc error: code = NotFound desc = could not find container \"42e29c7d7718a12bff325807da4091fe2c0be3b354e2a49acba84278ee0c6498\": container with ID starting with 42e29c7d7718a12bff325807da4091fe2c0be3b354e2a49acba84278ee0c6498 not found: ID does not exist" Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.754845 4871 scope.go:117] "RemoveContainer" containerID="b7990eadef8f271297f6c547ac596b961e463a6bb17d4505ad578d4d7b14af54" Nov 26 06:47:48 crc kubenswrapper[4871]: E1126 06:47:48.755151 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7990eadef8f271297f6c547ac596b961e463a6bb17d4505ad578d4d7b14af54\": container with ID starting with b7990eadef8f271297f6c547ac596b961e463a6bb17d4505ad578d4d7b14af54 not found: ID does not exist" containerID="b7990eadef8f271297f6c547ac596b961e463a6bb17d4505ad578d4d7b14af54" Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.755193 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7990eadef8f271297f6c547ac596b961e463a6bb17d4505ad578d4d7b14af54"} err="failed to get container status \"b7990eadef8f271297f6c547ac596b961e463a6bb17d4505ad578d4d7b14af54\": rpc error: code = NotFound desc = could not find container \"b7990eadef8f271297f6c547ac596b961e463a6bb17d4505ad578d4d7b14af54\": container with ID starting with b7990eadef8f271297f6c547ac596b961e463a6bb17d4505ad578d4d7b14af54 not found: ID does not exist" Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.755222 4871 scope.go:117] "RemoveContainer" containerID="8005ff27bdaae2ef18091a6f73ce0c55ae73102e004d8ba05abc96f4ff777348" Nov 26 06:47:48 crc kubenswrapper[4871]: E1126 06:47:48.755667 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8005ff27bdaae2ef18091a6f73ce0c55ae73102e004d8ba05abc96f4ff777348\": container with ID starting with 8005ff27bdaae2ef18091a6f73ce0c55ae73102e004d8ba05abc96f4ff777348 not found: ID does not exist" containerID="8005ff27bdaae2ef18091a6f73ce0c55ae73102e004d8ba05abc96f4ff777348" Nov 26 06:47:48 crc kubenswrapper[4871]: I1126 06:47:48.755732 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8005ff27bdaae2ef18091a6f73ce0c55ae73102e004d8ba05abc96f4ff777348"} err="failed to get container status \"8005ff27bdaae2ef18091a6f73ce0c55ae73102e004d8ba05abc96f4ff777348\": rpc error: code = NotFound desc = could not find container \"8005ff27bdaae2ef18091a6f73ce0c55ae73102e004d8ba05abc96f4ff777348\": container with ID starting with 8005ff27bdaae2ef18091a6f73ce0c55ae73102e004d8ba05abc96f4ff777348 not found: ID does not exist" Nov 26 06:47:50 crc kubenswrapper[4871]: I1126 06:47:50.524511 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9cdf42cd-1acb-4e73-a162-055680011437" path="/var/lib/kubelet/pods/9cdf42cd-1acb-4e73-a162-055680011437/volumes" Nov 26 06:48:23 crc kubenswrapper[4871]: I1126 06:48:23.615667 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:48:23 crc kubenswrapper[4871]: I1126 06:48:23.616255 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:48:53 crc kubenswrapper[4871]: I1126 06:48:53.615721 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:48:53 crc kubenswrapper[4871]: I1126 06:48:53.616311 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:49:23 crc kubenswrapper[4871]: I1126 06:49:23.036925 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-78dd8485c9-fx6sv" podUID="fcca2594-c385-49cd-8354-7e4fcfab96c8" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 26 06:49:23 crc kubenswrapper[4871]: I1126 06:49:23.615282 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:49:23 crc kubenswrapper[4871]: I1126 06:49:23.615345 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:49:23 crc kubenswrapper[4871]: I1126 06:49:23.615397 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 06:49:23 crc kubenswrapper[4871]: I1126 06:49:23.616223 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 06:49:23 crc kubenswrapper[4871]: I1126 06:49:23.616286 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" gracePeriod=600 Nov 26 06:49:23 crc kubenswrapper[4871]: E1126 06:49:23.988718 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:49:24 crc kubenswrapper[4871]: I1126 06:49:24.741197 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" exitCode=0 Nov 26 06:49:24 crc kubenswrapper[4871]: I1126 06:49:24.741291 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41"} Nov 26 06:49:24 crc kubenswrapper[4871]: I1126 06:49:24.743591 4871 scope.go:117] "RemoveContainer" containerID="9c988cadcd1c7db807d7f1faa554de6ce79af88aeccc2c41477f1c97e057816d" Nov 26 06:49:24 crc kubenswrapper[4871]: I1126 06:49:24.744606 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:49:24 crc kubenswrapper[4871]: E1126 06:49:24.745133 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:49:39 crc kubenswrapper[4871]: I1126 06:49:39.507495 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:49:39 crc kubenswrapper[4871]: E1126 06:49:39.508269 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:49:50 crc kubenswrapper[4871]: I1126 06:49:50.507199 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:49:50 crc kubenswrapper[4871]: E1126 06:49:50.508907 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:50:02 crc kubenswrapper[4871]: I1126 06:50:02.521932 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:50:02 crc kubenswrapper[4871]: E1126 06:50:02.523263 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:50:16 crc kubenswrapper[4871]: I1126 06:50:16.507373 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:50:16 crc kubenswrapper[4871]: E1126 06:50:16.508372 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:50:29 crc kubenswrapper[4871]: I1126 06:50:29.507236 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:50:29 crc kubenswrapper[4871]: E1126 06:50:29.507997 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:50:36 crc kubenswrapper[4871]: I1126 06:50:36.037128 4871 generic.go:334] "Generic (PLEG): container finished" podID="d4d1d560-ed1f-4b35-bde2-53c83e6ddabc" containerID="159b5a19cf451f34170e6c1567c3a6d1852b7e29a209a845a19d80e6e00262e6" exitCode=0 Nov 26 06:50:36 crc kubenswrapper[4871]: I1126 06:50:36.037232 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc","Type":"ContainerDied","Data":"159b5a19cf451f34170e6c1567c3a6d1852b7e29a209a845a19d80e6e00262e6"} Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.473371 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.576135 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.576193 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-config-data\") pod \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.576292 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-openstack-config\") pod \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.576326 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8h2ql\" (UniqueName: \"kubernetes.io/projected/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-kube-api-access-8h2ql\") pod \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.576353 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-test-operator-ephemeral-workdir\") pod \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.576408 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-test-operator-ephemeral-temporary\") pod \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.576453 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-ssh-key\") pod \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.576648 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-openstack-config-secret\") pod \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.576689 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-ca-certs\") pod \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\" (UID: \"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc\") " Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.577029 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "d4d1d560-ed1f-4b35-bde2-53c83e6ddabc" (UID: "d4d1d560-ed1f-4b35-bde2-53c83e6ddabc"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.577168 4871 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.577754 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-config-data" (OuterVolumeSpecName: "config-data") pod "d4d1d560-ed1f-4b35-bde2-53c83e6ddabc" (UID: "d4d1d560-ed1f-4b35-bde2-53c83e6ddabc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.581441 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "d4d1d560-ed1f-4b35-bde2-53c83e6ddabc" (UID: "d4d1d560-ed1f-4b35-bde2-53c83e6ddabc"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.587650 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "test-operator-logs") pod "d4d1d560-ed1f-4b35-bde2-53c83e6ddabc" (UID: "d4d1d560-ed1f-4b35-bde2-53c83e6ddabc"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.590715 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-kube-api-access-8h2ql" (OuterVolumeSpecName: "kube-api-access-8h2ql") pod "d4d1d560-ed1f-4b35-bde2-53c83e6ddabc" (UID: "d4d1d560-ed1f-4b35-bde2-53c83e6ddabc"). InnerVolumeSpecName "kube-api-access-8h2ql". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.608451 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "d4d1d560-ed1f-4b35-bde2-53c83e6ddabc" (UID: "d4d1d560-ed1f-4b35-bde2-53c83e6ddabc"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.610501 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d4d1d560-ed1f-4b35-bde2-53c83e6ddabc" (UID: "d4d1d560-ed1f-4b35-bde2-53c83e6ddabc"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.611839 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "d4d1d560-ed1f-4b35-bde2-53c83e6ddabc" (UID: "d4d1d560-ed1f-4b35-bde2-53c83e6ddabc"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.633330 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "d4d1d560-ed1f-4b35-bde2-53c83e6ddabc" (UID: "d4d1d560-ed1f-4b35-bde2-53c83e6ddabc"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.679227 4871 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.679294 4871 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.679361 4871 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.679377 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.679390 4871 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.684473 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8h2ql\" (UniqueName: \"kubernetes.io/projected/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-kube-api-access-8h2ql\") on node \"crc\" DevicePath \"\"" Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.684501 4871 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.684518 4871 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d4d1d560-ed1f-4b35-bde2-53c83e6ddabc-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.713336 4871 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 26 06:50:37 crc kubenswrapper[4871]: I1126 06:50:37.786837 4871 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 26 06:50:38 crc kubenswrapper[4871]: I1126 06:50:38.062042 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"d4d1d560-ed1f-4b35-bde2-53c83e6ddabc","Type":"ContainerDied","Data":"4d4cd0821e4fa8f6cf7636bbd9216f34ca4af89d31d511cca4163a4ca558336d"} Nov 26 06:50:38 crc kubenswrapper[4871]: I1126 06:50:38.062081 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d4cd0821e4fa8f6cf7636bbd9216f34ca4af89d31d511cca4163a4ca558336d" Nov 26 06:50:38 crc kubenswrapper[4871]: I1126 06:50:38.062090 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 26 06:50:44 crc kubenswrapper[4871]: I1126 06:50:44.507598 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:50:44 crc kubenswrapper[4871]: E1126 06:50:44.508568 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:50:46 crc kubenswrapper[4871]: I1126 06:50:46.647368 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 26 06:50:46 crc kubenswrapper[4871]: E1126 06:50:46.648749 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cdf42cd-1acb-4e73-a162-055680011437" containerName="extract-utilities" Nov 26 06:50:46 crc kubenswrapper[4871]: I1126 06:50:46.648840 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cdf42cd-1acb-4e73-a162-055680011437" containerName="extract-utilities" Nov 26 06:50:46 crc kubenswrapper[4871]: E1126 06:50:46.648931 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cdf42cd-1acb-4e73-a162-055680011437" containerName="registry-server" Nov 26 06:50:46 crc kubenswrapper[4871]: I1126 06:50:46.648993 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cdf42cd-1acb-4e73-a162-055680011437" containerName="registry-server" Nov 26 06:50:46 crc kubenswrapper[4871]: E1126 06:50:46.649077 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cdf42cd-1acb-4e73-a162-055680011437" containerName="extract-content" Nov 26 06:50:46 crc kubenswrapper[4871]: I1126 06:50:46.649138 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cdf42cd-1acb-4e73-a162-055680011437" containerName="extract-content" Nov 26 06:50:46 crc kubenswrapper[4871]: E1126 06:50:46.649217 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4d1d560-ed1f-4b35-bde2-53c83e6ddabc" containerName="tempest-tests-tempest-tests-runner" Nov 26 06:50:46 crc kubenswrapper[4871]: I1126 06:50:46.649273 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4d1d560-ed1f-4b35-bde2-53c83e6ddabc" containerName="tempest-tests-tempest-tests-runner" Nov 26 06:50:46 crc kubenswrapper[4871]: I1126 06:50:46.649516 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="9cdf42cd-1acb-4e73-a162-055680011437" containerName="registry-server" Nov 26 06:50:46 crc kubenswrapper[4871]: I1126 06:50:46.649623 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4d1d560-ed1f-4b35-bde2-53c83e6ddabc" containerName="tempest-tests-tempest-tests-runner" Nov 26 06:50:46 crc kubenswrapper[4871]: I1126 06:50:46.650398 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 06:50:46 crc kubenswrapper[4871]: I1126 06:50:46.652094 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-lgbwn" Nov 26 06:50:46 crc kubenswrapper[4871]: I1126 06:50:46.673490 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 26 06:50:46 crc kubenswrapper[4871]: I1126 06:50:46.802733 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtklx\" (UniqueName: \"kubernetes.io/projected/69802496-881e-4259-a45b-a75b1434b79d-kube-api-access-xtklx\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"69802496-881e-4259-a45b-a75b1434b79d\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 06:50:46 crc kubenswrapper[4871]: I1126 06:50:46.802930 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"69802496-881e-4259-a45b-a75b1434b79d\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 06:50:46 crc kubenswrapper[4871]: I1126 06:50:46.904337 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtklx\" (UniqueName: \"kubernetes.io/projected/69802496-881e-4259-a45b-a75b1434b79d-kube-api-access-xtklx\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"69802496-881e-4259-a45b-a75b1434b79d\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 06:50:46 crc kubenswrapper[4871]: I1126 06:50:46.904549 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"69802496-881e-4259-a45b-a75b1434b79d\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 06:50:46 crc kubenswrapper[4871]: I1126 06:50:46.905015 4871 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"69802496-881e-4259-a45b-a75b1434b79d\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 06:50:46 crc kubenswrapper[4871]: I1126 06:50:46.940353 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtklx\" (UniqueName: \"kubernetes.io/projected/69802496-881e-4259-a45b-a75b1434b79d-kube-api-access-xtklx\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"69802496-881e-4259-a45b-a75b1434b79d\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 06:50:46 crc kubenswrapper[4871]: I1126 06:50:46.943860 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"69802496-881e-4259-a45b-a75b1434b79d\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 06:50:46 crc kubenswrapper[4871]: I1126 06:50:46.987008 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 26 06:50:47 crc kubenswrapper[4871]: I1126 06:50:47.480418 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 26 06:50:47 crc kubenswrapper[4871]: I1126 06:50:47.516300 4871 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 06:50:48 crc kubenswrapper[4871]: I1126 06:50:48.170707 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"69802496-881e-4259-a45b-a75b1434b79d","Type":"ContainerStarted","Data":"7bfd946e5aa355bf6167b6eb5460e3d9ec74faafbde5b1dd66e7cf001b0434ab"} Nov 26 06:50:49 crc kubenswrapper[4871]: I1126 06:50:49.186910 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"69802496-881e-4259-a45b-a75b1434b79d","Type":"ContainerStarted","Data":"ae6b6731f8a1a0693f9e4fb371fb378dfe0b2cf78b1071e42fe612fcd65a7bd9"} Nov 26 06:50:49 crc kubenswrapper[4871]: I1126 06:50:49.215057 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.392267606 podStartE2EDuration="3.215028208s" podCreationTimestamp="2025-11-26 06:50:46 +0000 UTC" firstStartedPulling="2025-11-26 06:50:47.516035551 +0000 UTC m=+5105.699087137" lastFinishedPulling="2025-11-26 06:50:48.338796153 +0000 UTC m=+5106.521847739" observedRunningTime="2025-11-26 06:50:49.204617181 +0000 UTC m=+5107.387668777" watchObservedRunningTime="2025-11-26 06:50:49.215028208 +0000 UTC m=+5107.398079834" Nov 26 06:50:57 crc kubenswrapper[4871]: I1126 06:50:57.509724 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:50:57 crc kubenswrapper[4871]: E1126 06:50:57.510442 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:51:08 crc kubenswrapper[4871]: I1126 06:51:08.507982 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:51:08 crc kubenswrapper[4871]: E1126 06:51:08.509033 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:51:12 crc kubenswrapper[4871]: I1126 06:51:12.480172 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-sl4tf/must-gather-wnbsz"] Nov 26 06:51:12 crc kubenswrapper[4871]: I1126 06:51:12.482280 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sl4tf/must-gather-wnbsz" Nov 26 06:51:12 crc kubenswrapper[4871]: I1126 06:51:12.484690 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-sl4tf"/"openshift-service-ca.crt" Nov 26 06:51:12 crc kubenswrapper[4871]: I1126 06:51:12.484704 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-sl4tf"/"default-dockercfg-cwf42" Nov 26 06:51:12 crc kubenswrapper[4871]: I1126 06:51:12.485497 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-sl4tf"/"kube-root-ca.crt" Nov 26 06:51:12 crc kubenswrapper[4871]: I1126 06:51:12.493187 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-sl4tf/must-gather-wnbsz"] Nov 26 06:51:12 crc kubenswrapper[4871]: I1126 06:51:12.637729 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8xzm\" (UniqueName: \"kubernetes.io/projected/c7797f7c-aee3-49b3-accf-2072decd1ed1-kube-api-access-s8xzm\") pod \"must-gather-wnbsz\" (UID: \"c7797f7c-aee3-49b3-accf-2072decd1ed1\") " pod="openshift-must-gather-sl4tf/must-gather-wnbsz" Nov 26 06:51:12 crc kubenswrapper[4871]: I1126 06:51:12.638018 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c7797f7c-aee3-49b3-accf-2072decd1ed1-must-gather-output\") pod \"must-gather-wnbsz\" (UID: \"c7797f7c-aee3-49b3-accf-2072decd1ed1\") " pod="openshift-must-gather-sl4tf/must-gather-wnbsz" Nov 26 06:51:12 crc kubenswrapper[4871]: I1126 06:51:12.739862 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c7797f7c-aee3-49b3-accf-2072decd1ed1-must-gather-output\") pod \"must-gather-wnbsz\" (UID: \"c7797f7c-aee3-49b3-accf-2072decd1ed1\") " pod="openshift-must-gather-sl4tf/must-gather-wnbsz" Nov 26 06:51:12 crc kubenswrapper[4871]: I1126 06:51:12.739976 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8xzm\" (UniqueName: \"kubernetes.io/projected/c7797f7c-aee3-49b3-accf-2072decd1ed1-kube-api-access-s8xzm\") pod \"must-gather-wnbsz\" (UID: \"c7797f7c-aee3-49b3-accf-2072decd1ed1\") " pod="openshift-must-gather-sl4tf/must-gather-wnbsz" Nov 26 06:51:12 crc kubenswrapper[4871]: I1126 06:51:12.740493 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c7797f7c-aee3-49b3-accf-2072decd1ed1-must-gather-output\") pod \"must-gather-wnbsz\" (UID: \"c7797f7c-aee3-49b3-accf-2072decd1ed1\") " pod="openshift-must-gather-sl4tf/must-gather-wnbsz" Nov 26 06:51:12 crc kubenswrapper[4871]: I1126 06:51:12.765101 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8xzm\" (UniqueName: \"kubernetes.io/projected/c7797f7c-aee3-49b3-accf-2072decd1ed1-kube-api-access-s8xzm\") pod \"must-gather-wnbsz\" (UID: \"c7797f7c-aee3-49b3-accf-2072decd1ed1\") " pod="openshift-must-gather-sl4tf/must-gather-wnbsz" Nov 26 06:51:12 crc kubenswrapper[4871]: I1126 06:51:12.798102 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sl4tf/must-gather-wnbsz" Nov 26 06:51:13 crc kubenswrapper[4871]: I1126 06:51:13.298731 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-sl4tf/must-gather-wnbsz"] Nov 26 06:51:13 crc kubenswrapper[4871]: I1126 06:51:13.445364 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sl4tf/must-gather-wnbsz" event={"ID":"c7797f7c-aee3-49b3-accf-2072decd1ed1","Type":"ContainerStarted","Data":"352d9bc4e8b5f1f2321bdfeaa6ab5f7cce73ccba48069475656e0624ee89ba63"} Nov 26 06:51:20 crc kubenswrapper[4871]: I1126 06:51:20.535335 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sl4tf/must-gather-wnbsz" event={"ID":"c7797f7c-aee3-49b3-accf-2072decd1ed1","Type":"ContainerStarted","Data":"894a2408e86126a76b6b2935a9031f4afd993a26282a630ced7e525ffba2092e"} Nov 26 06:51:20 crc kubenswrapper[4871]: I1126 06:51:20.535881 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sl4tf/must-gather-wnbsz" event={"ID":"c7797f7c-aee3-49b3-accf-2072decd1ed1","Type":"ContainerStarted","Data":"ce54abd0c2bf5403069ccaa99941137d4f4502ce8d652e9e69223f714471e1cb"} Nov 26 06:51:20 crc kubenswrapper[4871]: I1126 06:51:20.553444 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-sl4tf/must-gather-wnbsz" podStartSLOduration=2.344635847 podStartE2EDuration="8.553418784s" podCreationTimestamp="2025-11-26 06:51:12 +0000 UTC" firstStartedPulling="2025-11-26 06:51:13.315097476 +0000 UTC m=+5131.498149052" lastFinishedPulling="2025-11-26 06:51:19.523880393 +0000 UTC m=+5137.706931989" observedRunningTime="2025-11-26 06:51:20.547491577 +0000 UTC m=+5138.730543173" watchObservedRunningTime="2025-11-26 06:51:20.553418784 +0000 UTC m=+5138.736470370" Nov 26 06:51:23 crc kubenswrapper[4871]: I1126 06:51:23.507960 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:51:23 crc kubenswrapper[4871]: E1126 06:51:23.509691 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:51:24 crc kubenswrapper[4871]: I1126 06:51:24.024323 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-sl4tf/crc-debug-6dkp7"] Nov 26 06:51:24 crc kubenswrapper[4871]: I1126 06:51:24.025914 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sl4tf/crc-debug-6dkp7" Nov 26 06:51:24 crc kubenswrapper[4871]: I1126 06:51:24.086021 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/205f3583-c36f-43ff-8752-230204424898-host\") pod \"crc-debug-6dkp7\" (UID: \"205f3583-c36f-43ff-8752-230204424898\") " pod="openshift-must-gather-sl4tf/crc-debug-6dkp7" Nov 26 06:51:24 crc kubenswrapper[4871]: I1126 06:51:24.086320 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ngcw4\" (UniqueName: \"kubernetes.io/projected/205f3583-c36f-43ff-8752-230204424898-kube-api-access-ngcw4\") pod \"crc-debug-6dkp7\" (UID: \"205f3583-c36f-43ff-8752-230204424898\") " pod="openshift-must-gather-sl4tf/crc-debug-6dkp7" Nov 26 06:51:24 crc kubenswrapper[4871]: I1126 06:51:24.187910 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/205f3583-c36f-43ff-8752-230204424898-host\") pod \"crc-debug-6dkp7\" (UID: \"205f3583-c36f-43ff-8752-230204424898\") " pod="openshift-must-gather-sl4tf/crc-debug-6dkp7" Nov 26 06:51:24 crc kubenswrapper[4871]: I1126 06:51:24.187973 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ngcw4\" (UniqueName: \"kubernetes.io/projected/205f3583-c36f-43ff-8752-230204424898-kube-api-access-ngcw4\") pod \"crc-debug-6dkp7\" (UID: \"205f3583-c36f-43ff-8752-230204424898\") " pod="openshift-must-gather-sl4tf/crc-debug-6dkp7" Nov 26 06:51:24 crc kubenswrapper[4871]: I1126 06:51:24.188540 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/205f3583-c36f-43ff-8752-230204424898-host\") pod \"crc-debug-6dkp7\" (UID: \"205f3583-c36f-43ff-8752-230204424898\") " pod="openshift-must-gather-sl4tf/crc-debug-6dkp7" Nov 26 06:51:24 crc kubenswrapper[4871]: I1126 06:51:24.211130 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ngcw4\" (UniqueName: \"kubernetes.io/projected/205f3583-c36f-43ff-8752-230204424898-kube-api-access-ngcw4\") pod \"crc-debug-6dkp7\" (UID: \"205f3583-c36f-43ff-8752-230204424898\") " pod="openshift-must-gather-sl4tf/crc-debug-6dkp7" Nov 26 06:51:24 crc kubenswrapper[4871]: I1126 06:51:24.363251 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sl4tf/crc-debug-6dkp7" Nov 26 06:51:24 crc kubenswrapper[4871]: I1126 06:51:24.582819 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sl4tf/crc-debug-6dkp7" event={"ID":"205f3583-c36f-43ff-8752-230204424898","Type":"ContainerStarted","Data":"518f21409e5f94c93612b08b793ad188e46fc74f03ff099a869cbb7b993b1095"} Nov 26 06:51:35 crc kubenswrapper[4871]: I1126 06:51:35.708467 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sl4tf/crc-debug-6dkp7" event={"ID":"205f3583-c36f-43ff-8752-230204424898","Type":"ContainerStarted","Data":"16f2f3501edaea771b64e764710a3b4423763a5151232aa4a1214be1732b9977"} Nov 26 06:51:35 crc kubenswrapper[4871]: I1126 06:51:35.726866 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-sl4tf/crc-debug-6dkp7" podStartSLOduration=2.051023167 podStartE2EDuration="11.726846354s" podCreationTimestamp="2025-11-26 06:51:24 +0000 UTC" firstStartedPulling="2025-11-26 06:51:24.437543495 +0000 UTC m=+5142.620595081" lastFinishedPulling="2025-11-26 06:51:34.113366682 +0000 UTC m=+5152.296418268" observedRunningTime="2025-11-26 06:51:35.720818025 +0000 UTC m=+5153.903869611" watchObservedRunningTime="2025-11-26 06:51:35.726846354 +0000 UTC m=+5153.909897940" Nov 26 06:51:36 crc kubenswrapper[4871]: I1126 06:51:36.507067 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:51:36 crc kubenswrapper[4871]: E1126 06:51:36.507682 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:51:51 crc kubenswrapper[4871]: I1126 06:51:51.507068 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:51:51 crc kubenswrapper[4871]: E1126 06:51:51.507752 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:52:02 crc kubenswrapper[4871]: I1126 06:52:02.513668 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:52:02 crc kubenswrapper[4871]: E1126 06:52:02.514192 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:52:17 crc kubenswrapper[4871]: I1126 06:52:17.507869 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:52:17 crc kubenswrapper[4871]: E1126 06:52:17.508924 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:52:27 crc kubenswrapper[4871]: I1126 06:52:27.186950 4871 generic.go:334] "Generic (PLEG): container finished" podID="205f3583-c36f-43ff-8752-230204424898" containerID="16f2f3501edaea771b64e764710a3b4423763a5151232aa4a1214be1732b9977" exitCode=0 Nov 26 06:52:27 crc kubenswrapper[4871]: I1126 06:52:27.187020 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sl4tf/crc-debug-6dkp7" event={"ID":"205f3583-c36f-43ff-8752-230204424898","Type":"ContainerDied","Data":"16f2f3501edaea771b64e764710a3b4423763a5151232aa4a1214be1732b9977"} Nov 26 06:52:28 crc kubenswrapper[4871]: I1126 06:52:28.328985 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sl4tf/crc-debug-6dkp7" Nov 26 06:52:28 crc kubenswrapper[4871]: I1126 06:52:28.367350 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-sl4tf/crc-debug-6dkp7"] Nov 26 06:52:28 crc kubenswrapper[4871]: I1126 06:52:28.382828 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-sl4tf/crc-debug-6dkp7"] Nov 26 06:52:28 crc kubenswrapper[4871]: I1126 06:52:28.436907 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngcw4\" (UniqueName: \"kubernetes.io/projected/205f3583-c36f-43ff-8752-230204424898-kube-api-access-ngcw4\") pod \"205f3583-c36f-43ff-8752-230204424898\" (UID: \"205f3583-c36f-43ff-8752-230204424898\") " Nov 26 06:52:28 crc kubenswrapper[4871]: I1126 06:52:28.437235 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/205f3583-c36f-43ff-8752-230204424898-host\") pod \"205f3583-c36f-43ff-8752-230204424898\" (UID: \"205f3583-c36f-43ff-8752-230204424898\") " Nov 26 06:52:28 crc kubenswrapper[4871]: I1126 06:52:28.437330 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/205f3583-c36f-43ff-8752-230204424898-host" (OuterVolumeSpecName: "host") pod "205f3583-c36f-43ff-8752-230204424898" (UID: "205f3583-c36f-43ff-8752-230204424898"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:52:28 crc kubenswrapper[4871]: I1126 06:52:28.437961 4871 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/205f3583-c36f-43ff-8752-230204424898-host\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:28 crc kubenswrapper[4871]: I1126 06:52:28.444795 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/205f3583-c36f-43ff-8752-230204424898-kube-api-access-ngcw4" (OuterVolumeSpecName: "kube-api-access-ngcw4") pod "205f3583-c36f-43ff-8752-230204424898" (UID: "205f3583-c36f-43ff-8752-230204424898"). InnerVolumeSpecName "kube-api-access-ngcw4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:52:28 crc kubenswrapper[4871]: I1126 06:52:28.507152 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:52:28 crc kubenswrapper[4871]: E1126 06:52:28.507517 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:52:28 crc kubenswrapper[4871]: I1126 06:52:28.522297 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="205f3583-c36f-43ff-8752-230204424898" path="/var/lib/kubelet/pods/205f3583-c36f-43ff-8752-230204424898/volumes" Nov 26 06:52:28 crc kubenswrapper[4871]: I1126 06:52:28.541053 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngcw4\" (UniqueName: \"kubernetes.io/projected/205f3583-c36f-43ff-8752-230204424898-kube-api-access-ngcw4\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:29 crc kubenswrapper[4871]: I1126 06:52:29.209970 4871 scope.go:117] "RemoveContainer" containerID="16f2f3501edaea771b64e764710a3b4423763a5151232aa4a1214be1732b9977" Nov 26 06:52:29 crc kubenswrapper[4871]: I1126 06:52:29.210008 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sl4tf/crc-debug-6dkp7" Nov 26 06:52:29 crc kubenswrapper[4871]: I1126 06:52:29.585190 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-sl4tf/crc-debug-4th4x"] Nov 26 06:52:29 crc kubenswrapper[4871]: E1126 06:52:29.586206 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="205f3583-c36f-43ff-8752-230204424898" containerName="container-00" Nov 26 06:52:29 crc kubenswrapper[4871]: I1126 06:52:29.586220 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="205f3583-c36f-43ff-8752-230204424898" containerName="container-00" Nov 26 06:52:29 crc kubenswrapper[4871]: I1126 06:52:29.586484 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="205f3583-c36f-43ff-8752-230204424898" containerName="container-00" Nov 26 06:52:29 crc kubenswrapper[4871]: I1126 06:52:29.587170 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sl4tf/crc-debug-4th4x" Nov 26 06:52:29 crc kubenswrapper[4871]: I1126 06:52:29.663558 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/508dbeba-cc4f-411c-8633-0a0962b2577b-host\") pod \"crc-debug-4th4x\" (UID: \"508dbeba-cc4f-411c-8633-0a0962b2577b\") " pod="openshift-must-gather-sl4tf/crc-debug-4th4x" Nov 26 06:52:29 crc kubenswrapper[4871]: I1126 06:52:29.663654 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xb7w7\" (UniqueName: \"kubernetes.io/projected/508dbeba-cc4f-411c-8633-0a0962b2577b-kube-api-access-xb7w7\") pod \"crc-debug-4th4x\" (UID: \"508dbeba-cc4f-411c-8633-0a0962b2577b\") " pod="openshift-must-gather-sl4tf/crc-debug-4th4x" Nov 26 06:52:29 crc kubenswrapper[4871]: I1126 06:52:29.765441 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xb7w7\" (UniqueName: \"kubernetes.io/projected/508dbeba-cc4f-411c-8633-0a0962b2577b-kube-api-access-xb7w7\") pod \"crc-debug-4th4x\" (UID: \"508dbeba-cc4f-411c-8633-0a0962b2577b\") " pod="openshift-must-gather-sl4tf/crc-debug-4th4x" Nov 26 06:52:29 crc kubenswrapper[4871]: I1126 06:52:29.765614 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/508dbeba-cc4f-411c-8633-0a0962b2577b-host\") pod \"crc-debug-4th4x\" (UID: \"508dbeba-cc4f-411c-8633-0a0962b2577b\") " pod="openshift-must-gather-sl4tf/crc-debug-4th4x" Nov 26 06:52:29 crc kubenswrapper[4871]: I1126 06:52:29.765700 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/508dbeba-cc4f-411c-8633-0a0962b2577b-host\") pod \"crc-debug-4th4x\" (UID: \"508dbeba-cc4f-411c-8633-0a0962b2577b\") " pod="openshift-must-gather-sl4tf/crc-debug-4th4x" Nov 26 06:52:29 crc kubenswrapper[4871]: I1126 06:52:29.785433 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xb7w7\" (UniqueName: \"kubernetes.io/projected/508dbeba-cc4f-411c-8633-0a0962b2577b-kube-api-access-xb7w7\") pod \"crc-debug-4th4x\" (UID: \"508dbeba-cc4f-411c-8633-0a0962b2577b\") " pod="openshift-must-gather-sl4tf/crc-debug-4th4x" Nov 26 06:52:29 crc kubenswrapper[4871]: I1126 06:52:29.944186 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sl4tf/crc-debug-4th4x" Nov 26 06:52:30 crc kubenswrapper[4871]: I1126 06:52:30.222290 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sl4tf/crc-debug-4th4x" event={"ID":"508dbeba-cc4f-411c-8633-0a0962b2577b","Type":"ContainerStarted","Data":"cfaf619691a907279ef5f65726978a6840920f77a2fb7b9867c3918ed012febf"} Nov 26 06:52:30 crc kubenswrapper[4871]: I1126 06:52:30.222684 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sl4tf/crc-debug-4th4x" event={"ID":"508dbeba-cc4f-411c-8633-0a0962b2577b","Type":"ContainerStarted","Data":"8343d7a5862bf59b17c1b581298b254a757d11bc4d307d706d5006afdaa7bccd"} Nov 26 06:52:30 crc kubenswrapper[4871]: I1126 06:52:30.246080 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-sl4tf/crc-debug-4th4x" podStartSLOduration=1.246057577 podStartE2EDuration="1.246057577s" podCreationTimestamp="2025-11-26 06:52:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 06:52:30.234087291 +0000 UTC m=+5208.417138877" watchObservedRunningTime="2025-11-26 06:52:30.246057577 +0000 UTC m=+5208.429109163" Nov 26 06:52:31 crc kubenswrapper[4871]: I1126 06:52:31.235839 4871 generic.go:334] "Generic (PLEG): container finished" podID="508dbeba-cc4f-411c-8633-0a0962b2577b" containerID="cfaf619691a907279ef5f65726978a6840920f77a2fb7b9867c3918ed012febf" exitCode=0 Nov 26 06:52:31 crc kubenswrapper[4871]: I1126 06:52:31.235918 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sl4tf/crc-debug-4th4x" event={"ID":"508dbeba-cc4f-411c-8633-0a0962b2577b","Type":"ContainerDied","Data":"cfaf619691a907279ef5f65726978a6840920f77a2fb7b9867c3918ed012febf"} Nov 26 06:52:32 crc kubenswrapper[4871]: I1126 06:52:32.375051 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sl4tf/crc-debug-4th4x" Nov 26 06:52:32 crc kubenswrapper[4871]: I1126 06:52:32.413497 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/508dbeba-cc4f-411c-8633-0a0962b2577b-host\") pod \"508dbeba-cc4f-411c-8633-0a0962b2577b\" (UID: \"508dbeba-cc4f-411c-8633-0a0962b2577b\") " Nov 26 06:52:32 crc kubenswrapper[4871]: I1126 06:52:32.413614 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xb7w7\" (UniqueName: \"kubernetes.io/projected/508dbeba-cc4f-411c-8633-0a0962b2577b-kube-api-access-xb7w7\") pod \"508dbeba-cc4f-411c-8633-0a0962b2577b\" (UID: \"508dbeba-cc4f-411c-8633-0a0962b2577b\") " Nov 26 06:52:32 crc kubenswrapper[4871]: I1126 06:52:32.413612 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/508dbeba-cc4f-411c-8633-0a0962b2577b-host" (OuterVolumeSpecName: "host") pod "508dbeba-cc4f-411c-8633-0a0962b2577b" (UID: "508dbeba-cc4f-411c-8633-0a0962b2577b"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:52:32 crc kubenswrapper[4871]: I1126 06:52:32.418916 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/508dbeba-cc4f-411c-8633-0a0962b2577b-kube-api-access-xb7w7" (OuterVolumeSpecName: "kube-api-access-xb7w7") pod "508dbeba-cc4f-411c-8633-0a0962b2577b" (UID: "508dbeba-cc4f-411c-8633-0a0962b2577b"). InnerVolumeSpecName "kube-api-access-xb7w7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:52:32 crc kubenswrapper[4871]: I1126 06:52:32.518786 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xb7w7\" (UniqueName: \"kubernetes.io/projected/508dbeba-cc4f-411c-8633-0a0962b2577b-kube-api-access-xb7w7\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:32 crc kubenswrapper[4871]: I1126 06:52:32.518820 4871 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/508dbeba-cc4f-411c-8633-0a0962b2577b-host\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:32 crc kubenswrapper[4871]: I1126 06:52:32.987949 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-sl4tf/crc-debug-4th4x"] Nov 26 06:52:33 crc kubenswrapper[4871]: I1126 06:52:33.004131 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-sl4tf/crc-debug-4th4x"] Nov 26 06:52:33 crc kubenswrapper[4871]: I1126 06:52:33.257707 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8343d7a5862bf59b17c1b581298b254a757d11bc4d307d706d5006afdaa7bccd" Nov 26 06:52:33 crc kubenswrapper[4871]: I1126 06:52:33.257756 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sl4tf/crc-debug-4th4x" Nov 26 06:52:34 crc kubenswrapper[4871]: I1126 06:52:34.160776 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-sl4tf/crc-debug-qm625"] Nov 26 06:52:34 crc kubenswrapper[4871]: E1126 06:52:34.162685 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="508dbeba-cc4f-411c-8633-0a0962b2577b" containerName="container-00" Nov 26 06:52:34 crc kubenswrapper[4871]: I1126 06:52:34.162770 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="508dbeba-cc4f-411c-8633-0a0962b2577b" containerName="container-00" Nov 26 06:52:34 crc kubenswrapper[4871]: I1126 06:52:34.163912 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="508dbeba-cc4f-411c-8633-0a0962b2577b" containerName="container-00" Nov 26 06:52:34 crc kubenswrapper[4871]: I1126 06:52:34.165841 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sl4tf/crc-debug-qm625" Nov 26 06:52:34 crc kubenswrapper[4871]: I1126 06:52:34.248201 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhk5v\" (UniqueName: \"kubernetes.io/projected/6598c03e-810f-428b-8f60-ee7fc1187c78-kube-api-access-qhk5v\") pod \"crc-debug-qm625\" (UID: \"6598c03e-810f-428b-8f60-ee7fc1187c78\") " pod="openshift-must-gather-sl4tf/crc-debug-qm625" Nov 26 06:52:34 crc kubenswrapper[4871]: I1126 06:52:34.248276 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6598c03e-810f-428b-8f60-ee7fc1187c78-host\") pod \"crc-debug-qm625\" (UID: \"6598c03e-810f-428b-8f60-ee7fc1187c78\") " pod="openshift-must-gather-sl4tf/crc-debug-qm625" Nov 26 06:52:34 crc kubenswrapper[4871]: I1126 06:52:34.350410 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhk5v\" (UniqueName: \"kubernetes.io/projected/6598c03e-810f-428b-8f60-ee7fc1187c78-kube-api-access-qhk5v\") pod \"crc-debug-qm625\" (UID: \"6598c03e-810f-428b-8f60-ee7fc1187c78\") " pod="openshift-must-gather-sl4tf/crc-debug-qm625" Nov 26 06:52:34 crc kubenswrapper[4871]: I1126 06:52:34.350506 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6598c03e-810f-428b-8f60-ee7fc1187c78-host\") pod \"crc-debug-qm625\" (UID: \"6598c03e-810f-428b-8f60-ee7fc1187c78\") " pod="openshift-must-gather-sl4tf/crc-debug-qm625" Nov 26 06:52:34 crc kubenswrapper[4871]: I1126 06:52:34.350749 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6598c03e-810f-428b-8f60-ee7fc1187c78-host\") pod \"crc-debug-qm625\" (UID: \"6598c03e-810f-428b-8f60-ee7fc1187c78\") " pod="openshift-must-gather-sl4tf/crc-debug-qm625" Nov 26 06:52:34 crc kubenswrapper[4871]: I1126 06:52:34.380951 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhk5v\" (UniqueName: \"kubernetes.io/projected/6598c03e-810f-428b-8f60-ee7fc1187c78-kube-api-access-qhk5v\") pod \"crc-debug-qm625\" (UID: \"6598c03e-810f-428b-8f60-ee7fc1187c78\") " pod="openshift-must-gather-sl4tf/crc-debug-qm625" Nov 26 06:52:34 crc kubenswrapper[4871]: I1126 06:52:34.491780 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sl4tf/crc-debug-qm625" Nov 26 06:52:34 crc kubenswrapper[4871]: I1126 06:52:34.518627 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="508dbeba-cc4f-411c-8633-0a0962b2577b" path="/var/lib/kubelet/pods/508dbeba-cc4f-411c-8633-0a0962b2577b/volumes" Nov 26 06:52:34 crc kubenswrapper[4871]: W1126 06:52:34.528417 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6598c03e_810f_428b_8f60_ee7fc1187c78.slice/crio-1c4c98c3fd631ffd494ebc917b009bf37a2759f3f609051419b25d549668d18c WatchSource:0}: Error finding container 1c4c98c3fd631ffd494ebc917b009bf37a2759f3f609051419b25d549668d18c: Status 404 returned error can't find the container with id 1c4c98c3fd631ffd494ebc917b009bf37a2759f3f609051419b25d549668d18c Nov 26 06:52:35 crc kubenswrapper[4871]: I1126 06:52:35.284563 4871 generic.go:334] "Generic (PLEG): container finished" podID="6598c03e-810f-428b-8f60-ee7fc1187c78" containerID="b96e16b5a47db132db842d0121aac5f8432a6a133af11183ac81e5da499ac645" exitCode=0 Nov 26 06:52:35 crc kubenswrapper[4871]: I1126 06:52:35.284669 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sl4tf/crc-debug-qm625" event={"ID":"6598c03e-810f-428b-8f60-ee7fc1187c78","Type":"ContainerDied","Data":"b96e16b5a47db132db842d0121aac5f8432a6a133af11183ac81e5da499ac645"} Nov 26 06:52:35 crc kubenswrapper[4871]: I1126 06:52:35.284952 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sl4tf/crc-debug-qm625" event={"ID":"6598c03e-810f-428b-8f60-ee7fc1187c78","Type":"ContainerStarted","Data":"1c4c98c3fd631ffd494ebc917b009bf37a2759f3f609051419b25d549668d18c"} Nov 26 06:52:35 crc kubenswrapper[4871]: I1126 06:52:35.336501 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-sl4tf/crc-debug-qm625"] Nov 26 06:52:35 crc kubenswrapper[4871]: I1126 06:52:35.350763 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-sl4tf/crc-debug-qm625"] Nov 26 06:52:36 crc kubenswrapper[4871]: I1126 06:52:36.415346 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sl4tf/crc-debug-qm625" Nov 26 06:52:36 crc kubenswrapper[4871]: I1126 06:52:36.495679 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qhk5v\" (UniqueName: \"kubernetes.io/projected/6598c03e-810f-428b-8f60-ee7fc1187c78-kube-api-access-qhk5v\") pod \"6598c03e-810f-428b-8f60-ee7fc1187c78\" (UID: \"6598c03e-810f-428b-8f60-ee7fc1187c78\") " Nov 26 06:52:36 crc kubenswrapper[4871]: I1126 06:52:36.495968 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6598c03e-810f-428b-8f60-ee7fc1187c78-host\") pod \"6598c03e-810f-428b-8f60-ee7fc1187c78\" (UID: \"6598c03e-810f-428b-8f60-ee7fc1187c78\") " Nov 26 06:52:36 crc kubenswrapper[4871]: I1126 06:52:36.496077 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6598c03e-810f-428b-8f60-ee7fc1187c78-host" (OuterVolumeSpecName: "host") pod "6598c03e-810f-428b-8f60-ee7fc1187c78" (UID: "6598c03e-810f-428b-8f60-ee7fc1187c78"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 06:52:36 crc kubenswrapper[4871]: I1126 06:52:36.496404 4871 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6598c03e-810f-428b-8f60-ee7fc1187c78-host\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:36 crc kubenswrapper[4871]: I1126 06:52:36.502223 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6598c03e-810f-428b-8f60-ee7fc1187c78-kube-api-access-qhk5v" (OuterVolumeSpecName: "kube-api-access-qhk5v") pod "6598c03e-810f-428b-8f60-ee7fc1187c78" (UID: "6598c03e-810f-428b-8f60-ee7fc1187c78"). InnerVolumeSpecName "kube-api-access-qhk5v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:52:36 crc kubenswrapper[4871]: I1126 06:52:36.520262 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6598c03e-810f-428b-8f60-ee7fc1187c78" path="/var/lib/kubelet/pods/6598c03e-810f-428b-8f60-ee7fc1187c78/volumes" Nov 26 06:52:36 crc kubenswrapper[4871]: I1126 06:52:36.598767 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qhk5v\" (UniqueName: \"kubernetes.io/projected/6598c03e-810f-428b-8f60-ee7fc1187c78-kube-api-access-qhk5v\") on node \"crc\" DevicePath \"\"" Nov 26 06:52:37 crc kubenswrapper[4871]: I1126 06:52:37.341762 4871 scope.go:117] "RemoveContainer" containerID="b96e16b5a47db132db842d0121aac5f8432a6a133af11183ac81e5da499ac645" Nov 26 06:52:37 crc kubenswrapper[4871]: I1126 06:52:37.341918 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sl4tf/crc-debug-qm625" Nov 26 06:52:43 crc kubenswrapper[4871]: I1126 06:52:43.509109 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:52:43 crc kubenswrapper[4871]: E1126 06:52:43.509853 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:52:57 crc kubenswrapper[4871]: I1126 06:52:57.508566 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:52:57 crc kubenswrapper[4871]: E1126 06:52:57.509295 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:53:02 crc kubenswrapper[4871]: I1126 06:53:02.531833 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6b4f49568b-znxq7_421fd2e9-5378-4cd9-89c0-523f89b8fea6/barbican-api-log/0.log" Nov 26 06:53:02 crc kubenswrapper[4871]: I1126 06:53:02.539645 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6b4f49568b-znxq7_421fd2e9-5378-4cd9-89c0-523f89b8fea6/barbican-api/0.log" Nov 26 06:53:02 crc kubenswrapper[4871]: I1126 06:53:02.770311 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-ff77984c8-tthxz_2d0d2e04-05e3-4ace-8b11-0d6317e7ed80/barbican-keystone-listener/0.log" Nov 26 06:53:02 crc kubenswrapper[4871]: I1126 06:53:02.784174 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-ff77984c8-tthxz_2d0d2e04-05e3-4ace-8b11-0d6317e7ed80/barbican-keystone-listener-log/0.log" Nov 26 06:53:02 crc kubenswrapper[4871]: I1126 06:53:02.836889 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-574cf75679-xcbqs_19019851-fc4d-41ff-ba88-f347dc3305a2/barbican-worker/0.log" Nov 26 06:53:02 crc kubenswrapper[4871]: I1126 06:53:02.970716 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-574cf75679-xcbqs_19019851-fc4d-41ff-ba88-f347dc3305a2/barbican-worker-log/0.log" Nov 26 06:53:03 crc kubenswrapper[4871]: I1126 06:53:03.071415 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq_a811292e-f231-48cd-98b5-4acd21f945ed/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 06:53:03 crc kubenswrapper[4871]: I1126 06:53:03.223164 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2ffa88fa-bd91-473e-8d4e-44fc61235b3d/ceilometer-central-agent/0.log" Nov 26 06:53:03 crc kubenswrapper[4871]: I1126 06:53:03.259792 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2ffa88fa-bd91-473e-8d4e-44fc61235b3d/ceilometer-notification-agent/0.log" Nov 26 06:53:03 crc kubenswrapper[4871]: I1126 06:53:03.272370 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2ffa88fa-bd91-473e-8d4e-44fc61235b3d/proxy-httpd/0.log" Nov 26 06:53:03 crc kubenswrapper[4871]: I1126 06:53:03.341665 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2ffa88fa-bd91-473e-8d4e-44fc61235b3d/sg-core/0.log" Nov 26 06:53:03 crc kubenswrapper[4871]: I1126 06:53:03.482927 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_bb2121e7-904c-4de4-a336-0ed681cd9be9/cinder-api-log/0.log" Nov 26 06:53:03 crc kubenswrapper[4871]: I1126 06:53:03.627911 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_bb2121e7-904c-4de4-a336-0ed681cd9be9/cinder-api/0.log" Nov 26 06:53:03 crc kubenswrapper[4871]: I1126 06:53:03.691716 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_adddc22d-b976-4931-8dde-359f0952b438/cinder-scheduler/0.log" Nov 26 06:53:03 crc kubenswrapper[4871]: I1126 06:53:03.745073 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_adddc22d-b976-4931-8dde-359f0952b438/probe/0.log" Nov 26 06:53:03 crc kubenswrapper[4871]: I1126 06:53:03.863192 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl_ff26f53b-8fe4-4dde-b475-348beb78046d/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 06:53:03 crc kubenswrapper[4871]: I1126 06:53:03.972918 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt_46746b5b-e35a-452a-bdad-12b497a8c3b0/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 06:53:04 crc kubenswrapper[4871]: I1126 06:53:04.087925 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-77b58f4b85-prlhs_7ea434ed-7152-4539-9589-d743e9d5b6c5/init/0.log" Nov 26 06:53:04 crc kubenswrapper[4871]: I1126 06:53:04.240960 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-77b58f4b85-prlhs_7ea434ed-7152-4539-9589-d743e9d5b6c5/init/0.log" Nov 26 06:53:04 crc kubenswrapper[4871]: I1126 06:53:04.309248 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt_e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 06:53:04 crc kubenswrapper[4871]: I1126 06:53:04.423650 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-77b58f4b85-prlhs_7ea434ed-7152-4539-9589-d743e9d5b6c5/dnsmasq-dns/0.log" Nov 26 06:53:04 crc kubenswrapper[4871]: I1126 06:53:04.499960 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_206a6ff7-c300-42b8-9816-a272aacc0d94/glance-httpd/0.log" Nov 26 06:53:04 crc kubenswrapper[4871]: I1126 06:53:04.527821 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_206a6ff7-c300-42b8-9816-a272aacc0d94/glance-log/0.log" Nov 26 06:53:04 crc kubenswrapper[4871]: I1126 06:53:04.689287 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_a199844c-c13e-47ce-8980-b3292e3435b3/glance-log/0.log" Nov 26 06:53:04 crc kubenswrapper[4871]: I1126 06:53:04.731379 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_a199844c-c13e-47ce-8980-b3292e3435b3/glance-httpd/0.log" Nov 26 06:53:04 crc kubenswrapper[4871]: I1126 06:53:04.959872 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7cbf6bc784-rm6hn_4a2ec979-4e84-42ce-9299-8b9f5d88f001/horizon/0.log" Nov 26 06:53:05 crc kubenswrapper[4871]: I1126 06:53:05.064801 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj_48acdf72-822b-456b-b545-bd1499db855d/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 06:53:05 crc kubenswrapper[4871]: I1126 06:53:05.391427 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-99lcn_8fa0c629-09c7-43d9-964c-37320a475595/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 06:53:05 crc kubenswrapper[4871]: I1126 06:53:05.452448 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7cbf6bc784-rm6hn_4a2ec979-4e84-42ce-9299-8b9f5d88f001/horizon-log/0.log" Nov 26 06:53:05 crc kubenswrapper[4871]: I1126 06:53:05.529897 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29402281-4rm7g_3af4cf7b-408a-44b2-a5b3-2919f8f8ee68/keystone-cron/0.log" Nov 26 06:53:05 crc kubenswrapper[4871]: I1126 06:53:05.720904 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_e20fd17b-5b64-4272-9876-347ea057aa04/kube-state-metrics/3.log" Nov 26 06:53:05 crc kubenswrapper[4871]: I1126 06:53:05.751365 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_e20fd17b-5b64-4272-9876-347ea057aa04/kube-state-metrics/2.log" Nov 26 06:53:05 crc kubenswrapper[4871]: I1126 06:53:05.903114 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-847fdf8fc-mswx4_609a98bb-6812-4d0f-b408-023056fc5bca/keystone-api/0.log" Nov 26 06:53:05 crc kubenswrapper[4871]: I1126 06:53:05.947766 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn_95ebef76-794b-40b5-bf99-3604b66446f2/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 06:53:06 crc kubenswrapper[4871]: I1126 06:53:06.387888 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf_b6bbc102-0536-4833-8d96-a94360126601/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 06:53:06 crc kubenswrapper[4871]: I1126 06:53:06.396613 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5566bf8457-7qhhj_7dc2c737-ebec-4a5a-b06b-ffc355fb0a77/neutron-httpd/0.log" Nov 26 06:53:06 crc kubenswrapper[4871]: I1126 06:53:06.443486 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5566bf8457-7qhhj_7dc2c737-ebec-4a5a-b06b-ffc355fb0a77/neutron-api/0.log" Nov 26 06:53:06 crc kubenswrapper[4871]: I1126 06:53:06.897590 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_20a39a9e-9f10-45c6-be1c-9834e366658f/nova-cell0-conductor-conductor/0.log" Nov 26 06:53:07 crc kubenswrapper[4871]: I1126 06:53:07.260963 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_a703f1e3-b021-4fe0-9c3f-a5a90b96678e/nova-cell1-conductor-conductor/0.log" Nov 26 06:53:07 crc kubenswrapper[4871]: I1126 06:53:07.573710 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_94714c91-ac3e-4195-9c74-84e090b73a6e/nova-cell1-novncproxy-novncproxy/0.log" Nov 26 06:53:07 crc kubenswrapper[4871]: I1126 06:53:07.822935 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-wnbt7_4872fb15-1719-4e77-b0c1-7a2754ff7991/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 06:53:07 crc kubenswrapper[4871]: I1126 06:53:07.976486 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_b0049ce2-17f9-4372-a66e-7c03a3763460/nova-api-log/0.log" Nov 26 06:53:08 crc kubenswrapper[4871]: I1126 06:53:08.154117 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_b0049ce2-17f9-4372-a66e-7c03a3763460/nova-api-api/0.log" Nov 26 06:53:08 crc kubenswrapper[4871]: I1126 06:53:08.192818 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_d8636384-aac2-4fd2-8f51-5cd6ca47c362/nova-metadata-log/0.log" Nov 26 06:53:08 crc kubenswrapper[4871]: I1126 06:53:08.444106 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_1a6ce456-795f-4bf1-bab9-f5de7cfd7abe/mysql-bootstrap/0.log" Nov 26 06:53:08 crc kubenswrapper[4871]: I1126 06:53:08.506973 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:53:08 crc kubenswrapper[4871]: E1126 06:53:08.507240 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:53:08 crc kubenswrapper[4871]: I1126 06:53:08.601094 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_27f01c4d-ea3c-4e99-ba8a-e31d9628307b/nova-scheduler-scheduler/0.log" Nov 26 06:53:08 crc kubenswrapper[4871]: I1126 06:53:08.692141 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_1a6ce456-795f-4bf1-bab9-f5de7cfd7abe/galera/0.log" Nov 26 06:53:08 crc kubenswrapper[4871]: I1126 06:53:08.707141 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_1a6ce456-795f-4bf1-bab9-f5de7cfd7abe/mysql-bootstrap/0.log" Nov 26 06:53:08 crc kubenswrapper[4871]: I1126 06:53:08.964292 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_fef4681d-3f18-4ed5-b251-92f53274dacd/mysql-bootstrap/0.log" Nov 26 06:53:09 crc kubenswrapper[4871]: I1126 06:53:09.124646 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_fef4681d-3f18-4ed5-b251-92f53274dacd/galera/0.log" Nov 26 06:53:09 crc kubenswrapper[4871]: I1126 06:53:09.144476 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_fef4681d-3f18-4ed5-b251-92f53274dacd/mysql-bootstrap/0.log" Nov 26 06:53:09 crc kubenswrapper[4871]: I1126 06:53:09.361509 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_7218b9c9-2508-46eb-8942-4c22b0c706cf/openstackclient/0.log" Nov 26 06:53:09 crc kubenswrapper[4871]: I1126 06:53:09.474399 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-m255d_de8a947b-6c51-4c33-b221-ea16d851bafb/ovn-controller/0.log" Nov 26 06:53:09 crc kubenswrapper[4871]: I1126 06:53:09.618031 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-wmd2n_d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd/openstack-network-exporter/0.log" Nov 26 06:53:09 crc kubenswrapper[4871]: I1126 06:53:09.870908 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-t9t82_9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e/ovsdb-server-init/0.log" Nov 26 06:53:10 crc kubenswrapper[4871]: I1126 06:53:10.043972 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_d8636384-aac2-4fd2-8f51-5cd6ca47c362/nova-metadata-metadata/0.log" Nov 26 06:53:10 crc kubenswrapper[4871]: I1126 06:53:10.045071 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-t9t82_9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e/ovsdb-server/0.log" Nov 26 06:53:10 crc kubenswrapper[4871]: I1126 06:53:10.059645 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-t9t82_9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e/ovsdb-server-init/0.log" Nov 26 06:53:10 crc kubenswrapper[4871]: I1126 06:53:10.289334 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-896gl_8d747185-1d52-4102-be05-7f18ff179f3a/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 06:53:10 crc kubenswrapper[4871]: I1126 06:53:10.430900 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-t9t82_9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e/ovs-vswitchd/0.log" Nov 26 06:53:10 crc kubenswrapper[4871]: I1126 06:53:10.543587 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_251bc2ce-32a0-4d94-843b-f7ac83e601f4/openstack-network-exporter/0.log" Nov 26 06:53:10 crc kubenswrapper[4871]: I1126 06:53:10.586965 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_251bc2ce-32a0-4d94-843b-f7ac83e601f4/ovn-northd/0.log" Nov 26 06:53:10 crc kubenswrapper[4871]: I1126 06:53:10.641569 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_df0ee863-8fbb-4a6e-86e3-8d56cf38da47/openstack-network-exporter/0.log" Nov 26 06:53:10 crc kubenswrapper[4871]: I1126 06:53:10.919415 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_df0ee863-8fbb-4a6e-86e3-8d56cf38da47/ovsdbserver-nb/0.log" Nov 26 06:53:10 crc kubenswrapper[4871]: I1126 06:53:10.952259 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_bd85545a-d991-4635-8d4b-2b81937e389f/openstack-network-exporter/0.log" Nov 26 06:53:11 crc kubenswrapper[4871]: I1126 06:53:11.062936 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_bd85545a-d991-4635-8d4b-2b81937e389f/ovsdbserver-sb/0.log" Nov 26 06:53:11 crc kubenswrapper[4871]: I1126 06:53:11.380381 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-665fcf99fb-m82r7_ae63dcae-cddc-4f63-acc0-4ec3254a6116/placement-api/0.log" Nov 26 06:53:11 crc kubenswrapper[4871]: I1126 06:53:11.447785 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-665fcf99fb-m82r7_ae63dcae-cddc-4f63-acc0-4ec3254a6116/placement-log/0.log" Nov 26 06:53:11 crc kubenswrapper[4871]: I1126 06:53:11.453002 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8ea6e2b4-f88f-48c1-9044-5697a38a7abb/init-config-reloader/0.log" Nov 26 06:53:11 crc kubenswrapper[4871]: I1126 06:53:11.597664 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8ea6e2b4-f88f-48c1-9044-5697a38a7abb/config-reloader/0.log" Nov 26 06:53:11 crc kubenswrapper[4871]: I1126 06:53:11.655436 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8ea6e2b4-f88f-48c1-9044-5697a38a7abb/init-config-reloader/0.log" Nov 26 06:53:11 crc kubenswrapper[4871]: I1126 06:53:11.660970 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8ea6e2b4-f88f-48c1-9044-5697a38a7abb/prometheus/0.log" Nov 26 06:53:11 crc kubenswrapper[4871]: I1126 06:53:11.670817 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8ea6e2b4-f88f-48c1-9044-5697a38a7abb/thanos-sidecar/0.log" Nov 26 06:53:11 crc kubenswrapper[4871]: I1126 06:53:11.824906 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c02a9e9c-8083-4903-a64d-a140b1c9c143/setup-container/0.log" Nov 26 06:53:12 crc kubenswrapper[4871]: I1126 06:53:12.116986 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c02a9e9c-8083-4903-a64d-a140b1c9c143/setup-container/0.log" Nov 26 06:53:12 crc kubenswrapper[4871]: I1126 06:53:12.133080 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c02a9e9c-8083-4903-a64d-a140b1c9c143/rabbitmq/0.log" Nov 26 06:53:12 crc kubenswrapper[4871]: I1126 06:53:12.230066 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_7df95f1b-7a5b-445e-bb56-b17695a0bde9/setup-container/0.log" Nov 26 06:53:12 crc kubenswrapper[4871]: I1126 06:53:12.371959 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_7df95f1b-7a5b-445e-bb56-b17695a0bde9/setup-container/0.log" Nov 26 06:53:12 crc kubenswrapper[4871]: I1126 06:53:12.416404 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_7df95f1b-7a5b-445e-bb56-b17695a0bde9/rabbitmq/0.log" Nov 26 06:53:12 crc kubenswrapper[4871]: I1126 06:53:12.441500 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_f823aa11-fe59-4296-9a43-81bfc1275737/setup-container/0.log" Nov 26 06:53:12 crc kubenswrapper[4871]: I1126 06:53:12.667700 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_f823aa11-fe59-4296-9a43-81bfc1275737/rabbitmq/0.log" Nov 26 06:53:12 crc kubenswrapper[4871]: I1126 06:53:12.720940 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_f823aa11-fe59-4296-9a43-81bfc1275737/setup-container/0.log" Nov 26 06:53:12 crc kubenswrapper[4871]: I1126 06:53:12.724414 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m_d39ab741-a044-4ac6-9f2a-0949948cafdb/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 06:53:12 crc kubenswrapper[4871]: I1126 06:53:12.935181 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-vgjkj_4027b3b8-7a16-419f-8b16-52ff000c7268/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 06:53:13 crc kubenswrapper[4871]: I1126 06:53:13.005020 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7_2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 06:53:13 crc kubenswrapper[4871]: I1126 06:53:13.258536 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-cglzv_fca1e368-592f-4da5-b8f8-12bb29eca743/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 06:53:13 crc kubenswrapper[4871]: I1126 06:53:13.283581 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-d5dz5_0a3a838b-5101-4706-a5d9-50fc5797ba72/ssh-known-hosts-edpm-deployment/0.log" Nov 26 06:53:13 crc kubenswrapper[4871]: I1126 06:53:13.576358 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-qtnpf_bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d/swift-ring-rebalance/0.log" Nov 26 06:53:13 crc kubenswrapper[4871]: I1126 06:53:13.610662 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-78dd8485c9-fx6sv_fcca2594-c385-49cd-8354-7e4fcfab96c8/proxy-server/0.log" Nov 26 06:53:13 crc kubenswrapper[4871]: I1126 06:53:13.656772 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-78dd8485c9-fx6sv_fcca2594-c385-49cd-8354-7e4fcfab96c8/proxy-httpd/0.log" Nov 26 06:53:13 crc kubenswrapper[4871]: I1126 06:53:13.792876 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/account-auditor/0.log" Nov 26 06:53:13 crc kubenswrapper[4871]: I1126 06:53:13.862875 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/account-reaper/0.log" Nov 26 06:53:13 crc kubenswrapper[4871]: I1126 06:53:13.925177 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/account-replicator/0.log" Nov 26 06:53:13 crc kubenswrapper[4871]: I1126 06:53:13.986258 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/account-server/0.log" Nov 26 06:53:14 crc kubenswrapper[4871]: I1126 06:53:14.031457 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/container-auditor/0.log" Nov 26 06:53:14 crc kubenswrapper[4871]: I1126 06:53:14.096457 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/container-replicator/0.log" Nov 26 06:53:14 crc kubenswrapper[4871]: I1126 06:53:14.111024 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/container-server/0.log" Nov 26 06:53:14 crc kubenswrapper[4871]: I1126 06:53:14.247125 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/container-updater/0.log" Nov 26 06:53:14 crc kubenswrapper[4871]: I1126 06:53:14.253808 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/object-auditor/0.log" Nov 26 06:53:14 crc kubenswrapper[4871]: I1126 06:53:14.306289 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/object-expirer/0.log" Nov 26 06:53:14 crc kubenswrapper[4871]: I1126 06:53:14.388352 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/object-replicator/0.log" Nov 26 06:53:14 crc kubenswrapper[4871]: I1126 06:53:14.436355 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/object-updater/0.log" Nov 26 06:53:14 crc kubenswrapper[4871]: I1126 06:53:14.483421 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/object-server/0.log" Nov 26 06:53:14 crc kubenswrapper[4871]: I1126 06:53:14.501565 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/rsync/0.log" Nov 26 06:53:14 crc kubenswrapper[4871]: I1126 06:53:14.575972 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/swift-recon-cron/0.log" Nov 26 06:53:14 crc kubenswrapper[4871]: I1126 06:53:14.695720 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-8mffj_bfc1b363-fb5b-4872-bf7f-215dc9c617b5/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 06:53:14 crc kubenswrapper[4871]: I1126 06:53:14.823327 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_d4d1d560-ed1f-4b35-bde2-53c83e6ddabc/tempest-tests-tempest-tests-runner/0.log" Nov 26 06:53:14 crc kubenswrapper[4871]: I1126 06:53:14.947319 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_69802496-881e-4259-a45b-a75b1434b79d/test-operator-logs-container/0.log" Nov 26 06:53:15 crc kubenswrapper[4871]: I1126 06:53:15.091627 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk_a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 06:53:15 crc kubenswrapper[4871]: I1126 06:53:15.092227 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_2757b1a6-7b8f-4008-8a08-96985496ec1a/memcached/0.log" Nov 26 06:53:15 crc kubenswrapper[4871]: I1126 06:53:15.789956 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-applier-0_e939bb2f-dadb-4353-8845-f31c42b87a75/watcher-applier/0.log" Nov 26 06:53:16 crc kubenswrapper[4871]: I1126 06:53:16.497087 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_154bc562-d8d8-4608-8973-66b427a4f98f/watcher-api-log/0.log" Nov 26 06:53:18 crc kubenswrapper[4871]: I1126 06:53:18.403284 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-decision-engine-0_de5b1e93-a28e-405b-8ab4-a1bc50922b2e/watcher-decision-engine/0.log" Nov 26 06:53:19 crc kubenswrapper[4871]: I1126 06:53:19.161291 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_154bc562-d8d8-4608-8973-66b427a4f98f/watcher-api/0.log" Nov 26 06:53:22 crc kubenswrapper[4871]: I1126 06:53:22.513834 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:53:22 crc kubenswrapper[4871]: E1126 06:53:22.514580 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:53:35 crc kubenswrapper[4871]: I1126 06:53:35.507747 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:53:35 crc kubenswrapper[4871]: E1126 06:53:35.508492 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:53:42 crc kubenswrapper[4871]: I1126 06:53:42.576991 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b_2a991e15-5da4-457c-95b8-64e0ba0b7f0c/util/0.log" Nov 26 06:53:42 crc kubenswrapper[4871]: I1126 06:53:42.759425 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b_2a991e15-5da4-457c-95b8-64e0ba0b7f0c/util/0.log" Nov 26 06:53:42 crc kubenswrapper[4871]: I1126 06:53:42.775331 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b_2a991e15-5da4-457c-95b8-64e0ba0b7f0c/pull/0.log" Nov 26 06:53:42 crc kubenswrapper[4871]: I1126 06:53:42.791023 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b_2a991e15-5da4-457c-95b8-64e0ba0b7f0c/pull/0.log" Nov 26 06:53:42 crc kubenswrapper[4871]: I1126 06:53:42.940131 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b_2a991e15-5da4-457c-95b8-64e0ba0b7f0c/util/0.log" Nov 26 06:53:42 crc kubenswrapper[4871]: I1126 06:53:42.958918 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b_2a991e15-5da4-457c-95b8-64e0ba0b7f0c/extract/0.log" Nov 26 06:53:42 crc kubenswrapper[4871]: I1126 06:53:42.981132 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b_2a991e15-5da4-457c-95b8-64e0ba0b7f0c/pull/0.log" Nov 26 06:53:43 crc kubenswrapper[4871]: I1126 06:53:43.099251 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-bdpn4_8c65e9f4-e3de-4bce-851a-f85c1036daa7/kube-rbac-proxy/0.log" Nov 26 06:53:43 crc kubenswrapper[4871]: I1126 06:53:43.131492 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-bdpn4_8c65e9f4-e3de-4bce-851a-f85c1036daa7/manager/2.log" Nov 26 06:53:43 crc kubenswrapper[4871]: I1126 06:53:43.171898 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-bdpn4_8c65e9f4-e3de-4bce-851a-f85c1036daa7/manager/1.log" Nov 26 06:53:43 crc kubenswrapper[4871]: I1126 06:53:43.265606 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-wmwwk_ea13fc75-b3f0-48d3-9d86-5262df2957eb/kube-rbac-proxy/0.log" Nov 26 06:53:43 crc kubenswrapper[4871]: I1126 06:53:43.334919 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-wmwwk_ea13fc75-b3f0-48d3-9d86-5262df2957eb/manager/3.log" Nov 26 06:53:43 crc kubenswrapper[4871]: I1126 06:53:43.406064 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-wmwwk_ea13fc75-b3f0-48d3-9d86-5262df2957eb/manager/2.log" Nov 26 06:53:43 crc kubenswrapper[4871]: I1126 06:53:43.441423 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-tsz49_70168336-54b1-481f-b6a0-d565be07d353/kube-rbac-proxy/0.log" Nov 26 06:53:43 crc kubenswrapper[4871]: I1126 06:53:43.530792 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-tsz49_70168336-54b1-481f-b6a0-d565be07d353/manager/3.log" Nov 26 06:53:43 crc kubenswrapper[4871]: I1126 06:53:43.576630 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-tsz49_70168336-54b1-481f-b6a0-d565be07d353/manager/2.log" Nov 26 06:53:43 crc kubenswrapper[4871]: I1126 06:53:43.655006 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-czv5j_94ce6277-5176-415b-9f4d-847a73c93723/kube-rbac-proxy/0.log" Nov 26 06:53:43 crc kubenswrapper[4871]: I1126 06:53:43.736324 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-czv5j_94ce6277-5176-415b-9f4d-847a73c93723/manager/3.log" Nov 26 06:53:43 crc kubenswrapper[4871]: I1126 06:53:43.768439 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-czv5j_94ce6277-5176-415b-9f4d-847a73c93723/manager/2.log" Nov 26 06:53:43 crc kubenswrapper[4871]: I1126 06:53:43.823977 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-5kslm_9253bdc4-d16f-42eb-8704-0965e99dfe47/kube-rbac-proxy/0.log" Nov 26 06:53:43 crc kubenswrapper[4871]: I1126 06:53:43.913233 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-5kslm_9253bdc4-d16f-42eb-8704-0965e99dfe47/manager/3.log" Nov 26 06:53:43 crc kubenswrapper[4871]: I1126 06:53:43.949486 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-5kslm_9253bdc4-d16f-42eb-8704-0965e99dfe47/manager/2.log" Nov 26 06:53:44 crc kubenswrapper[4871]: I1126 06:53:44.046371 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-clm5v_4659b831-32eb-4da2-97f3-f654a299605e/kube-rbac-proxy/0.log" Nov 26 06:53:44 crc kubenswrapper[4871]: I1126 06:53:44.080294 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-clm5v_4659b831-32eb-4da2-97f3-f654a299605e/manager/2.log" Nov 26 06:53:44 crc kubenswrapper[4871]: I1126 06:53:44.147401 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-clm5v_4659b831-32eb-4da2-97f3-f654a299605e/manager/1.log" Nov 26 06:53:44 crc kubenswrapper[4871]: I1126 06:53:44.230112 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-x5hqw_06b4e3ae-765b-41c4-9334-4e33c2dc305f/kube-rbac-proxy/0.log" Nov 26 06:53:44 crc kubenswrapper[4871]: I1126 06:53:44.234889 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-x5hqw_06b4e3ae-765b-41c4-9334-4e33c2dc305f/manager/3.log" Nov 26 06:53:44 crc kubenswrapper[4871]: I1126 06:53:44.353076 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-x5hqw_06b4e3ae-765b-41c4-9334-4e33c2dc305f/manager/2.log" Nov 26 06:53:44 crc kubenswrapper[4871]: I1126 06:53:44.416289 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-lzsqj_51410db5-d309-4625-8f36-02cf8f0ba419/kube-rbac-proxy/0.log" Nov 26 06:53:44 crc kubenswrapper[4871]: I1126 06:53:44.450952 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-lzsqj_51410db5-d309-4625-8f36-02cf8f0ba419/manager/3.log" Nov 26 06:53:44 crc kubenswrapper[4871]: I1126 06:53:44.531247 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-lzsqj_51410db5-d309-4625-8f36-02cf8f0ba419/manager/2.log" Nov 26 06:53:44 crc kubenswrapper[4871]: I1126 06:53:44.614046 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-4gvxx_32cd59dd-1a82-4fce-81b1-ebc8f75f1e93/kube-rbac-proxy/0.log" Nov 26 06:53:44 crc kubenswrapper[4871]: I1126 06:53:44.657916 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-4gvxx_32cd59dd-1a82-4fce-81b1-ebc8f75f1e93/manager/3.log" Nov 26 06:53:44 crc kubenswrapper[4871]: I1126 06:53:44.701938 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-4gvxx_32cd59dd-1a82-4fce-81b1-ebc8f75f1e93/manager/2.log" Nov 26 06:53:44 crc kubenswrapper[4871]: I1126 06:53:44.808424 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-jvztg_6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c/kube-rbac-proxy/0.log" Nov 26 06:53:44 crc kubenswrapper[4871]: I1126 06:53:44.856588 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-jvztg_6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c/manager/3.log" Nov 26 06:53:44 crc kubenswrapper[4871]: I1126 06:53:44.900005 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-jvztg_6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c/manager/2.log" Nov 26 06:53:45 crc kubenswrapper[4871]: I1126 06:53:45.008223 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-9xghq_2c7b5f25-e4ef-4abd-ba84-61b98f194ddd/kube-rbac-proxy/0.log" Nov 26 06:53:45 crc kubenswrapper[4871]: I1126 06:53:45.027930 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-9xghq_2c7b5f25-e4ef-4abd-ba84-61b98f194ddd/manager/3.log" Nov 26 06:53:45 crc kubenswrapper[4871]: I1126 06:53:45.074812 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-9xghq_2c7b5f25-e4ef-4abd-ba84-61b98f194ddd/manager/2.log" Nov 26 06:53:45 crc kubenswrapper[4871]: I1126 06:53:45.205281 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-6lpnj_19a75285-dcb7-4f34-b79c-613c96d555de/kube-rbac-proxy/0.log" Nov 26 06:53:45 crc kubenswrapper[4871]: I1126 06:53:45.207827 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-6lpnj_19a75285-dcb7-4f34-b79c-613c96d555de/manager/3.log" Nov 26 06:53:45 crc kubenswrapper[4871]: I1126 06:53:45.239953 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-6lpnj_19a75285-dcb7-4f34-b79c-613c96d555de/manager/2.log" Nov 26 06:53:45 crc kubenswrapper[4871]: I1126 06:53:45.359260 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-rlr55_f68377a4-dee0-404b-988a-4f0673466e62/kube-rbac-proxy/0.log" Nov 26 06:53:45 crc kubenswrapper[4871]: I1126 06:53:45.403173 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-rlr55_f68377a4-dee0-404b-988a-4f0673466e62/manager/2.log" Nov 26 06:53:45 crc kubenswrapper[4871]: I1126 06:53:45.453649 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-rlr55_f68377a4-dee0-404b-988a-4f0673466e62/manager/1.log" Nov 26 06:53:45 crc kubenswrapper[4871]: I1126 06:53:45.580690 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-9lvtk_33ba2b4e-6239-43c0-a694-6495b7ae2ba3/kube-rbac-proxy/0.log" Nov 26 06:53:45 crc kubenswrapper[4871]: I1126 06:53:45.588277 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-9lvtk_33ba2b4e-6239-43c0-a694-6495b7ae2ba3/manager/3.log" Nov 26 06:53:45 crc kubenswrapper[4871]: I1126 06:53:45.635398 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-9lvtk_33ba2b4e-6239-43c0-a694-6495b7ae2ba3/manager/2.log" Nov 26 06:53:45 crc kubenswrapper[4871]: I1126 06:53:45.734592 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg_6b5541da-9198-4f49-998b-1bfd982089d1/manager/1.log" Nov 26 06:53:45 crc kubenswrapper[4871]: I1126 06:53:45.823364 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg_6b5541da-9198-4f49-998b-1bfd982089d1/manager/0.log" Nov 26 06:53:45 crc kubenswrapper[4871]: I1126 06:53:45.839182 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg_6b5541da-9198-4f49-998b-1bfd982089d1/kube-rbac-proxy/0.log" Nov 26 06:53:46 crc kubenswrapper[4871]: I1126 06:53:46.033684 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-56868586f6-2v8hx_6d7ff4ed-503b-4184-8633-47598150b7f0/manager/2.log" Nov 26 06:53:46 crc kubenswrapper[4871]: I1126 06:53:46.240393 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-56868586f6-2v8hx_6d7ff4ed-503b-4184-8633-47598150b7f0/manager/3.log" Nov 26 06:53:46 crc kubenswrapper[4871]: I1126 06:53:46.507062 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:53:46 crc kubenswrapper[4871]: E1126 06:53:46.507492 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:53:46 crc kubenswrapper[4871]: I1126 06:53:46.754347 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5675dd9766-bp9px_d78961c7-c9ff-4550-bf75-add0fcef53fe/operator/1.log" Nov 26 06:53:46 crc kubenswrapper[4871]: I1126 06:53:46.764800 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="1a6ce456-795f-4bf1-bab9-f5de7cfd7abe" containerName="galera" probeResult="failure" output="command timed out" Nov 26 06:53:46 crc kubenswrapper[4871]: I1126 06:53:46.904073 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-pwvh2_87895915-b98b-423d-b00c-9dd92656f1a8/registry-server/0.log" Nov 26 06:53:46 crc kubenswrapper[4871]: I1126 06:53:46.981776 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-shgb6_6ccd73b2-dbfd-4cd6-845c-a61af4f20f96/kube-rbac-proxy/0.log" Nov 26 06:53:47 crc kubenswrapper[4871]: I1126 06:53:47.012056 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5675dd9766-bp9px_d78961c7-c9ff-4550-bf75-add0fcef53fe/operator/0.log" Nov 26 06:53:47 crc kubenswrapper[4871]: I1126 06:53:47.104448 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-shgb6_6ccd73b2-dbfd-4cd6-845c-a61af4f20f96/manager/3.log" Nov 26 06:53:47 crc kubenswrapper[4871]: I1126 06:53:47.184270 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-shgb6_6ccd73b2-dbfd-4cd6-845c-a61af4f20f96/manager/2.log" Nov 26 06:53:47 crc kubenswrapper[4871]: I1126 06:53:47.240563 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-dxbwn_1cc75505-b927-488b-8a16-4fda9a1c2dca/kube-rbac-proxy/0.log" Nov 26 06:53:47 crc kubenswrapper[4871]: I1126 06:53:47.301371 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-dxbwn_1cc75505-b927-488b-8a16-4fda9a1c2dca/manager/3.log" Nov 26 06:53:47 crc kubenswrapper[4871]: I1126 06:53:47.342691 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-dxbwn_1cc75505-b927-488b-8a16-4fda9a1c2dca/manager/2.log" Nov 26 06:53:47 crc kubenswrapper[4871]: I1126 06:53:47.403633 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-6c6pc_0b2406e7-8b16-45e1-b726-645d22421af5/operator/3.log" Nov 26 06:53:47 crc kubenswrapper[4871]: I1126 06:53:47.478926 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-6c6pc_0b2406e7-8b16-45e1-b726-645d22421af5/operator/2.log" Nov 26 06:53:47 crc kubenswrapper[4871]: I1126 06:53:47.544327 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-skx5k_4b0778b1-b974-4ce6-bac4-59920ab67dd7/kube-rbac-proxy/0.log" Nov 26 06:53:47 crc kubenswrapper[4871]: I1126 06:53:47.546585 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-skx5k_4b0778b1-b974-4ce6-bac4-59920ab67dd7/manager/3.log" Nov 26 06:53:47 crc kubenswrapper[4871]: I1126 06:53:47.595843 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-skx5k_4b0778b1-b974-4ce6-bac4-59920ab67dd7/manager/2.log" Nov 26 06:53:47 crc kubenswrapper[4871]: I1126 06:53:47.692464 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-6kccm_974fe30e-68b5-42bb-9940-a2000ab315f8/kube-rbac-proxy/0.log" Nov 26 06:53:47 crc kubenswrapper[4871]: I1126 06:53:47.718952 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-6kccm_974fe30e-68b5-42bb-9940-a2000ab315f8/manager/2.log" Nov 26 06:53:47 crc kubenswrapper[4871]: I1126 06:53:47.744471 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-6kccm_974fe30e-68b5-42bb-9940-a2000ab315f8/manager/3.log" Nov 26 06:53:48 crc kubenswrapper[4871]: I1126 06:53:48.752065 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-jj87z_1b4fb0bb-1050-4bda-acf4-c3efafc79e4a/kube-rbac-proxy/0.log" Nov 26 06:53:48 crc kubenswrapper[4871]: I1126 06:53:48.761991 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-jj87z_1b4fb0bb-1050-4bda-acf4-c3efafc79e4a/manager/1.log" Nov 26 06:53:48 crc kubenswrapper[4871]: I1126 06:53:48.825232 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-jj87z_1b4fb0bb-1050-4bda-acf4-c3efafc79e4a/manager/0.log" Nov 26 06:53:48 crc kubenswrapper[4871]: I1126 06:53:48.838117 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-v95x7_8d32351e-c0cc-4c2a-89b2-a79b61cf632e/kube-rbac-proxy/0.log" Nov 26 06:53:48 crc kubenswrapper[4871]: I1126 06:53:48.933321 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-v95x7_8d32351e-c0cc-4c2a-89b2-a79b61cf632e/manager/2.log" Nov 26 06:53:48 crc kubenswrapper[4871]: I1126 06:53:48.942999 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-v95x7_8d32351e-c0cc-4c2a-89b2-a79b61cf632e/manager/3.log" Nov 26 06:53:59 crc kubenswrapper[4871]: I1126 06:53:59.508673 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:53:59 crc kubenswrapper[4871]: E1126 06:53:59.509642 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:54:07 crc kubenswrapper[4871]: I1126 06:54:07.842211 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-2pb6b_dd4302fa-1a28-4718-b14c-f85e45519916/control-plane-machine-set-operator/0.log" Nov 26 06:54:07 crc kubenswrapper[4871]: I1126 06:54:07.857175 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-wg5vb_4edc5fd4-3610-4fa0-bf22-5ee6a41f6589/kube-rbac-proxy/0.log" Nov 26 06:54:08 crc kubenswrapper[4871]: I1126 06:54:08.013781 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-wg5vb_4edc5fd4-3610-4fa0-bf22-5ee6a41f6589/machine-api-operator/0.log" Nov 26 06:54:11 crc kubenswrapper[4871]: I1126 06:54:11.507716 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:54:11 crc kubenswrapper[4871]: E1126 06:54:11.508846 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 06:54:21 crc kubenswrapper[4871]: I1126 06:54:21.217333 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-gkprb_c2ecf354-32f2-4cb3-80f1-e964ce5a3bdc/cert-manager-controller/1.log" Nov 26 06:54:21 crc kubenswrapper[4871]: I1126 06:54:21.283243 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-gkprb_c2ecf354-32f2-4cb3-80f1-e964ce5a3bdc/cert-manager-controller/0.log" Nov 26 06:54:21 crc kubenswrapper[4871]: I1126 06:54:21.395312 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-v7fsr_250180c0-d204-44e0-83b1-64259ea3bd68/cert-manager-cainjector/1.log" Nov 26 06:54:21 crc kubenswrapper[4871]: I1126 06:54:21.469382 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-v7fsr_250180c0-d204-44e0-83b1-64259ea3bd68/cert-manager-cainjector/0.log" Nov 26 06:54:21 crc kubenswrapper[4871]: I1126 06:54:21.558837 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-2v767_6a758ba2-2916-440d-9a57-149111e0ff4c/cert-manager-webhook/0.log" Nov 26 06:54:24 crc kubenswrapper[4871]: I1126 06:54:24.507878 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:54:25 crc kubenswrapper[4871]: I1126 06:54:25.483285 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"8c8e6c6e65605e9250462f55057cd081512df064b1cb341cc927f8d5cb10ef03"} Nov 26 06:54:34 crc kubenswrapper[4871]: I1126 06:54:34.306352 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-kgf8h_bacf9337-da95-4df5-9f49-a9e6c46ac060/nmstate-console-plugin/0.log" Nov 26 06:54:34 crc kubenswrapper[4871]: I1126 06:54:34.507238 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-w8pzw_f24be1df-be1a-4389-a3d5-7842b91f18b4/nmstate-handler/0.log" Nov 26 06:54:34 crc kubenswrapper[4871]: I1126 06:54:34.572316 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-z92tv_d6cb9226-08bd-44d7-97b7-ac75848ef5bd/kube-rbac-proxy/0.log" Nov 26 06:54:34 crc kubenswrapper[4871]: I1126 06:54:34.581794 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-z92tv_d6cb9226-08bd-44d7-97b7-ac75848ef5bd/nmstate-metrics/0.log" Nov 26 06:54:34 crc kubenswrapper[4871]: I1126 06:54:34.743499 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-xm9jn_4b9d5c2d-8d95-4b86-86e4-6e425a8c6814/nmstate-operator/0.log" Nov 26 06:54:34 crc kubenswrapper[4871]: I1126 06:54:34.831617 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-ngv76_85b6422a-f943-4ced-8695-3d7f52f5f145/nmstate-webhook/0.log" Nov 26 06:54:51 crc kubenswrapper[4871]: I1126 06:54:51.341053 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-n9ldw_df243ac7-b567-4159-8103-103df0831280/kube-rbac-proxy/0.log" Nov 26 06:54:51 crc kubenswrapper[4871]: I1126 06:54:51.520480 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-n9ldw_df243ac7-b567-4159-8103-103df0831280/controller/0.log" Nov 26 06:54:52 crc kubenswrapper[4871]: I1126 06:54:52.190209 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-frr-files/0.log" Nov 26 06:54:52 crc kubenswrapper[4871]: I1126 06:54:52.335594 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-frr-files/0.log" Nov 26 06:54:52 crc kubenswrapper[4871]: I1126 06:54:52.370170 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-reloader/0.log" Nov 26 06:54:52 crc kubenswrapper[4871]: I1126 06:54:52.394158 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-reloader/0.log" Nov 26 06:54:52 crc kubenswrapper[4871]: I1126 06:54:52.401372 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-metrics/0.log" Nov 26 06:54:52 crc kubenswrapper[4871]: I1126 06:54:52.828055 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-metrics/0.log" Nov 26 06:54:52 crc kubenswrapper[4871]: I1126 06:54:52.828206 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-frr-files/0.log" Nov 26 06:54:52 crc kubenswrapper[4871]: I1126 06:54:52.876506 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-reloader/0.log" Nov 26 06:54:52 crc kubenswrapper[4871]: I1126 06:54:52.906303 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-metrics/0.log" Nov 26 06:54:53 crc kubenswrapper[4871]: I1126 06:54:53.059737 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-reloader/0.log" Nov 26 06:54:53 crc kubenswrapper[4871]: I1126 06:54:53.069274 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-metrics/0.log" Nov 26 06:54:53 crc kubenswrapper[4871]: I1126 06:54:53.087860 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/controller/0.log" Nov 26 06:54:53 crc kubenswrapper[4871]: I1126 06:54:53.094799 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-frr-files/0.log" Nov 26 06:54:53 crc kubenswrapper[4871]: I1126 06:54:53.267146 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/frr-metrics/0.log" Nov 26 06:54:53 crc kubenswrapper[4871]: I1126 06:54:53.289699 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/kube-rbac-proxy/0.log" Nov 26 06:54:53 crc kubenswrapper[4871]: I1126 06:54:53.324284 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/kube-rbac-proxy-frr/0.log" Nov 26 06:54:53 crc kubenswrapper[4871]: I1126 06:54:53.508356 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/reloader/0.log" Nov 26 06:54:53 crc kubenswrapper[4871]: I1126 06:54:53.605235 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-nnxbg_91a86765-1b7c-445b-8930-dc06e96fc752/frr-k8s-webhook-server/0.log" Nov 26 06:54:53 crc kubenswrapper[4871]: I1126 06:54:53.867346 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-645b9949f7-48k8g_0f2d5628-2ad3-400c-bc77-b0251683a83a/manager/2.log" Nov 26 06:54:53 crc kubenswrapper[4871]: I1126 06:54:53.977017 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-645b9949f7-48k8g_0f2d5628-2ad3-400c-bc77-b0251683a83a/manager/3.log" Nov 26 06:54:54 crc kubenswrapper[4871]: I1126 06:54:54.114678 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-69c6746fd5-pkb65_35333648-4e74-4c66-803e-091d7d5673ca/webhook-server/0.log" Nov 26 06:54:54 crc kubenswrapper[4871]: I1126 06:54:54.300185 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-2nt4b_e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b/kube-rbac-proxy/0.log" Nov 26 06:54:54 crc kubenswrapper[4871]: I1126 06:54:54.930865 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-2nt4b_e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b/speaker/0.log" Nov 26 06:54:54 crc kubenswrapper[4871]: I1126 06:54:54.985954 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/frr/0.log" Nov 26 06:55:08 crc kubenswrapper[4871]: I1126 06:55:08.488842 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr_b8b55d68-fcd3-43c4-94fe-344ed7cdb002/util/0.log" Nov 26 06:55:08 crc kubenswrapper[4871]: I1126 06:55:08.670177 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr_b8b55d68-fcd3-43c4-94fe-344ed7cdb002/pull/0.log" Nov 26 06:55:08 crc kubenswrapper[4871]: I1126 06:55:08.685966 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr_b8b55d68-fcd3-43c4-94fe-344ed7cdb002/pull/0.log" Nov 26 06:55:08 crc kubenswrapper[4871]: I1126 06:55:08.694668 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr_b8b55d68-fcd3-43c4-94fe-344ed7cdb002/util/0.log" Nov 26 06:55:08 crc kubenswrapper[4871]: I1126 06:55:08.820297 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr_b8b55d68-fcd3-43c4-94fe-344ed7cdb002/util/0.log" Nov 26 06:55:08 crc kubenswrapper[4871]: I1126 06:55:08.838112 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr_b8b55d68-fcd3-43c4-94fe-344ed7cdb002/pull/0.log" Nov 26 06:55:08 crc kubenswrapper[4871]: I1126 06:55:08.838927 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr_b8b55d68-fcd3-43c4-94fe-344ed7cdb002/extract/0.log" Nov 26 06:55:09 crc kubenswrapper[4871]: I1126 06:55:09.006706 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9/util/0.log" Nov 26 06:55:09 crc kubenswrapper[4871]: I1126 06:55:09.194499 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9/util/0.log" Nov 26 06:55:09 crc kubenswrapper[4871]: I1126 06:55:09.213902 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9/pull/0.log" Nov 26 06:55:09 crc kubenswrapper[4871]: I1126 06:55:09.242630 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9/pull/0.log" Nov 26 06:55:09 crc kubenswrapper[4871]: I1126 06:55:09.378385 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9/pull/0.log" Nov 26 06:55:09 crc kubenswrapper[4871]: I1126 06:55:09.388271 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9/util/0.log" Nov 26 06:55:09 crc kubenswrapper[4871]: I1126 06:55:09.392617 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9/extract/0.log" Nov 26 06:55:09 crc kubenswrapper[4871]: I1126 06:55:09.566721 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4qdhh_4151ee9a-4d65-4438-bf55-d437df2482d8/extract-utilities/0.log" Nov 26 06:55:09 crc kubenswrapper[4871]: I1126 06:55:09.759989 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4qdhh_4151ee9a-4d65-4438-bf55-d437df2482d8/extract-utilities/0.log" Nov 26 06:55:09 crc kubenswrapper[4871]: I1126 06:55:09.784805 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4qdhh_4151ee9a-4d65-4438-bf55-d437df2482d8/extract-content/0.log" Nov 26 06:55:09 crc kubenswrapper[4871]: I1126 06:55:09.788227 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4qdhh_4151ee9a-4d65-4438-bf55-d437df2482d8/extract-content/0.log" Nov 26 06:55:09 crc kubenswrapper[4871]: I1126 06:55:09.981122 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4qdhh_4151ee9a-4d65-4438-bf55-d437df2482d8/extract-content/0.log" Nov 26 06:55:10 crc kubenswrapper[4871]: I1126 06:55:10.049221 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4qdhh_4151ee9a-4d65-4438-bf55-d437df2482d8/extract-utilities/0.log" Nov 26 06:55:10 crc kubenswrapper[4871]: I1126 06:55:10.245559 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2wpn8_e6301213-5be0-4241-ba6d-01e1cfc78b78/extract-utilities/0.log" Nov 26 06:55:10 crc kubenswrapper[4871]: I1126 06:55:10.844126 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4qdhh_4151ee9a-4d65-4438-bf55-d437df2482d8/registry-server/0.log" Nov 26 06:55:11 crc kubenswrapper[4871]: I1126 06:55:11.221575 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2wpn8_e6301213-5be0-4241-ba6d-01e1cfc78b78/extract-utilities/0.log" Nov 26 06:55:11 crc kubenswrapper[4871]: I1126 06:55:11.222503 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2wpn8_e6301213-5be0-4241-ba6d-01e1cfc78b78/extract-content/0.log" Nov 26 06:55:11 crc kubenswrapper[4871]: I1126 06:55:11.247925 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2wpn8_e6301213-5be0-4241-ba6d-01e1cfc78b78/extract-content/0.log" Nov 26 06:55:11 crc kubenswrapper[4871]: I1126 06:55:11.390242 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2wpn8_e6301213-5be0-4241-ba6d-01e1cfc78b78/extract-utilities/0.log" Nov 26 06:55:11 crc kubenswrapper[4871]: I1126 06:55:11.397955 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2wpn8_e6301213-5be0-4241-ba6d-01e1cfc78b78/extract-content/0.log" Nov 26 06:55:11 crc kubenswrapper[4871]: I1126 06:55:11.601382 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk_48fae954-7c94-4755-8e57-c910119b6089/util/0.log" Nov 26 06:55:11 crc kubenswrapper[4871]: I1126 06:55:11.832043 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk_48fae954-7c94-4755-8e57-c910119b6089/pull/0.log" Nov 26 06:55:11 crc kubenswrapper[4871]: I1126 06:55:11.872759 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk_48fae954-7c94-4755-8e57-c910119b6089/pull/0.log" Nov 26 06:55:11 crc kubenswrapper[4871]: I1126 06:55:11.916042 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk_48fae954-7c94-4755-8e57-c910119b6089/util/0.log" Nov 26 06:55:12 crc kubenswrapper[4871]: I1126 06:55:12.074341 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk_48fae954-7c94-4755-8e57-c910119b6089/util/0.log" Nov 26 06:55:12 crc kubenswrapper[4871]: I1126 06:55:12.142348 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk_48fae954-7c94-4755-8e57-c910119b6089/extract/0.log" Nov 26 06:55:12 crc kubenswrapper[4871]: I1126 06:55:12.145678 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk_48fae954-7c94-4755-8e57-c910119b6089/pull/0.log" Nov 26 06:55:12 crc kubenswrapper[4871]: I1126 06:55:12.337571 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2wpn8_e6301213-5be0-4241-ba6d-01e1cfc78b78/registry-server/0.log" Nov 26 06:55:12 crc kubenswrapper[4871]: I1126 06:55:12.951453 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qks66_65ad1a09-cc57-45f2-9a13-2d83b8b8221c/marketplace-operator/1.log" Nov 26 06:55:12 crc kubenswrapper[4871]: I1126 06:55:12.966266 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fqzs2_9879edf7-a11e-49fa-a1ad-b8057cc59072/extract-utilities/0.log" Nov 26 06:55:13 crc kubenswrapper[4871]: I1126 06:55:13.016869 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qks66_65ad1a09-cc57-45f2-9a13-2d83b8b8221c/marketplace-operator/0.log" Nov 26 06:55:13 crc kubenswrapper[4871]: I1126 06:55:13.178636 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fqzs2_9879edf7-a11e-49fa-a1ad-b8057cc59072/extract-content/0.log" Nov 26 06:55:13 crc kubenswrapper[4871]: I1126 06:55:13.181024 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fqzs2_9879edf7-a11e-49fa-a1ad-b8057cc59072/extract-content/0.log" Nov 26 06:55:13 crc kubenswrapper[4871]: I1126 06:55:13.187590 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fqzs2_9879edf7-a11e-49fa-a1ad-b8057cc59072/extract-utilities/0.log" Nov 26 06:55:13 crc kubenswrapper[4871]: I1126 06:55:13.346099 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fqzs2_9879edf7-a11e-49fa-a1ad-b8057cc59072/extract-utilities/0.log" Nov 26 06:55:13 crc kubenswrapper[4871]: I1126 06:55:13.350129 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fqzs2_9879edf7-a11e-49fa-a1ad-b8057cc59072/extract-content/0.log" Nov 26 06:55:13 crc kubenswrapper[4871]: I1126 06:55:13.446159 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xb2lw_5113372b-1125-4d32-8af6-160defd5579a/extract-utilities/0.log" Nov 26 06:55:13 crc kubenswrapper[4871]: I1126 06:55:13.533990 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fqzs2_9879edf7-a11e-49fa-a1ad-b8057cc59072/registry-server/0.log" Nov 26 06:55:13 crc kubenswrapper[4871]: I1126 06:55:13.616401 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xb2lw_5113372b-1125-4d32-8af6-160defd5579a/extract-content/0.log" Nov 26 06:55:13 crc kubenswrapper[4871]: I1126 06:55:13.655809 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xb2lw_5113372b-1125-4d32-8af6-160defd5579a/extract-utilities/0.log" Nov 26 06:55:13 crc kubenswrapper[4871]: I1126 06:55:13.669993 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xb2lw_5113372b-1125-4d32-8af6-160defd5579a/extract-content/0.log" Nov 26 06:55:13 crc kubenswrapper[4871]: I1126 06:55:13.797248 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xb2lw_5113372b-1125-4d32-8af6-160defd5579a/extract-content/0.log" Nov 26 06:55:13 crc kubenswrapper[4871]: I1126 06:55:13.829926 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xb2lw_5113372b-1125-4d32-8af6-160defd5579a/extract-utilities/0.log" Nov 26 06:55:14 crc kubenswrapper[4871]: I1126 06:55:14.534948 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xb2lw_5113372b-1125-4d32-8af6-160defd5579a/registry-server/0.log" Nov 26 06:55:27 crc kubenswrapper[4871]: I1126 06:55:27.559011 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-bmfw7_b9220e8d-267e-4462-b6ea-094a0f724eb3/prometheus-operator/0.log" Nov 26 06:55:27 crc kubenswrapper[4871]: I1126 06:55:27.689554 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj_995a4906-508d-4285-b40c-5b14fd9d7b98/prometheus-operator-admission-webhook/0.log" Nov 26 06:55:27 crc kubenswrapper[4871]: I1126 06:55:27.759648 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5_3cd1c1e8-5430-4209-a0e2-3176d0ebb70a/prometheus-operator-admission-webhook/0.log" Nov 26 06:55:28 crc kubenswrapper[4871]: I1126 06:55:28.060280 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-l8rk7_2e5f535a-ead4-47e3-a477-20cf74b0828a/operator/0.log" Nov 26 06:55:28 crc kubenswrapper[4871]: I1126 06:55:28.098859 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-9f8vw_b0a308b8-6586-4d48-b431-ce0c6f46a23e/perses-operator/0.log" Nov 26 06:55:40 crc kubenswrapper[4871]: E1126 06:55:40.645735 4871 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.44:50758->38.102.83.44:38809: read tcp 38.102.83.44:50758->38.102.83.44:38809: read: connection reset by peer Nov 26 06:55:43 crc kubenswrapper[4871]: I1126 06:55:43.604180 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cghqb"] Nov 26 06:55:43 crc kubenswrapper[4871]: E1126 06:55:43.605839 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6598c03e-810f-428b-8f60-ee7fc1187c78" containerName="container-00" Nov 26 06:55:43 crc kubenswrapper[4871]: I1126 06:55:43.605920 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="6598c03e-810f-428b-8f60-ee7fc1187c78" containerName="container-00" Nov 26 06:55:43 crc kubenswrapper[4871]: I1126 06:55:43.612405 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="6598c03e-810f-428b-8f60-ee7fc1187c78" containerName="container-00" Nov 26 06:55:43 crc kubenswrapper[4871]: I1126 06:55:43.633281 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cghqb" Nov 26 06:55:43 crc kubenswrapper[4871]: I1126 06:55:43.639945 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cghqb"] Nov 26 06:55:43 crc kubenswrapper[4871]: I1126 06:55:43.819666 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6plf2\" (UniqueName: \"kubernetes.io/projected/2c10adeb-9619-4879-b32f-57208b2b30fa-kube-api-access-6plf2\") pod \"redhat-operators-cghqb\" (UID: \"2c10adeb-9619-4879-b32f-57208b2b30fa\") " pod="openshift-marketplace/redhat-operators-cghqb" Nov 26 06:55:43 crc kubenswrapper[4871]: I1126 06:55:43.819810 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c10adeb-9619-4879-b32f-57208b2b30fa-catalog-content\") pod \"redhat-operators-cghqb\" (UID: \"2c10adeb-9619-4879-b32f-57208b2b30fa\") " pod="openshift-marketplace/redhat-operators-cghqb" Nov 26 06:55:43 crc kubenswrapper[4871]: I1126 06:55:43.819868 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c10adeb-9619-4879-b32f-57208b2b30fa-utilities\") pod \"redhat-operators-cghqb\" (UID: \"2c10adeb-9619-4879-b32f-57208b2b30fa\") " pod="openshift-marketplace/redhat-operators-cghqb" Nov 26 06:55:43 crc kubenswrapper[4871]: I1126 06:55:43.921395 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c10adeb-9619-4879-b32f-57208b2b30fa-catalog-content\") pod \"redhat-operators-cghqb\" (UID: \"2c10adeb-9619-4879-b32f-57208b2b30fa\") " pod="openshift-marketplace/redhat-operators-cghqb" Nov 26 06:55:43 crc kubenswrapper[4871]: I1126 06:55:43.921478 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c10adeb-9619-4879-b32f-57208b2b30fa-utilities\") pod \"redhat-operators-cghqb\" (UID: \"2c10adeb-9619-4879-b32f-57208b2b30fa\") " pod="openshift-marketplace/redhat-operators-cghqb" Nov 26 06:55:43 crc kubenswrapper[4871]: I1126 06:55:43.921629 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6plf2\" (UniqueName: \"kubernetes.io/projected/2c10adeb-9619-4879-b32f-57208b2b30fa-kube-api-access-6plf2\") pod \"redhat-operators-cghqb\" (UID: \"2c10adeb-9619-4879-b32f-57208b2b30fa\") " pod="openshift-marketplace/redhat-operators-cghqb" Nov 26 06:55:43 crc kubenswrapper[4871]: I1126 06:55:43.921985 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c10adeb-9619-4879-b32f-57208b2b30fa-catalog-content\") pod \"redhat-operators-cghqb\" (UID: \"2c10adeb-9619-4879-b32f-57208b2b30fa\") " pod="openshift-marketplace/redhat-operators-cghqb" Nov 26 06:55:43 crc kubenswrapper[4871]: I1126 06:55:43.922049 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c10adeb-9619-4879-b32f-57208b2b30fa-utilities\") pod \"redhat-operators-cghqb\" (UID: \"2c10adeb-9619-4879-b32f-57208b2b30fa\") " pod="openshift-marketplace/redhat-operators-cghqb" Nov 26 06:55:44 crc kubenswrapper[4871]: I1126 06:55:44.007600 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6plf2\" (UniqueName: \"kubernetes.io/projected/2c10adeb-9619-4879-b32f-57208b2b30fa-kube-api-access-6plf2\") pod \"redhat-operators-cghqb\" (UID: \"2c10adeb-9619-4879-b32f-57208b2b30fa\") " pod="openshift-marketplace/redhat-operators-cghqb" Nov 26 06:55:44 crc kubenswrapper[4871]: I1126 06:55:44.278833 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cghqb" Nov 26 06:55:44 crc kubenswrapper[4871]: I1126 06:55:44.942827 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cghqb"] Nov 26 06:55:45 crc kubenswrapper[4871]: I1126 06:55:45.248035 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cghqb" event={"ID":"2c10adeb-9619-4879-b32f-57208b2b30fa","Type":"ContainerStarted","Data":"bb3cc5a76a85fd37a494dd8e37bddd467178b4c1223946cf0260d7ebc5b66c80"} Nov 26 06:55:46 crc kubenswrapper[4871]: I1126 06:55:46.259402 4871 generic.go:334] "Generic (PLEG): container finished" podID="2c10adeb-9619-4879-b32f-57208b2b30fa" containerID="3e14590ab4604c290d0902d96ec249d08117556bf88b9d17e10bf78062b2f9a0" exitCode=0 Nov 26 06:55:46 crc kubenswrapper[4871]: I1126 06:55:46.259491 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cghqb" event={"ID":"2c10adeb-9619-4879-b32f-57208b2b30fa","Type":"ContainerDied","Data":"3e14590ab4604c290d0902d96ec249d08117556bf88b9d17e10bf78062b2f9a0"} Nov 26 06:55:48 crc kubenswrapper[4871]: I1126 06:55:48.286069 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cghqb" event={"ID":"2c10adeb-9619-4879-b32f-57208b2b30fa","Type":"ContainerStarted","Data":"e5f219aae7fcfdbadc3be63ff8e4869c0ed3664b6a9c79fc68a538daae54635c"} Nov 26 06:55:51 crc kubenswrapper[4871]: I1126 06:55:51.327812 4871 generic.go:334] "Generic (PLEG): container finished" podID="2c10adeb-9619-4879-b32f-57208b2b30fa" containerID="e5f219aae7fcfdbadc3be63ff8e4869c0ed3664b6a9c79fc68a538daae54635c" exitCode=0 Nov 26 06:55:51 crc kubenswrapper[4871]: I1126 06:55:51.328165 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cghqb" event={"ID":"2c10adeb-9619-4879-b32f-57208b2b30fa","Type":"ContainerDied","Data":"e5f219aae7fcfdbadc3be63ff8e4869c0ed3664b6a9c79fc68a538daae54635c"} Nov 26 06:55:51 crc kubenswrapper[4871]: I1126 06:55:51.330441 4871 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 06:55:52 crc kubenswrapper[4871]: I1126 06:55:52.344240 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cghqb" event={"ID":"2c10adeb-9619-4879-b32f-57208b2b30fa","Type":"ContainerStarted","Data":"addc84753cd32cb0553555c0909b3e537b564c5628ccd27ce7c460e5714f2c1d"} Nov 26 06:55:52 crc kubenswrapper[4871]: I1126 06:55:52.363873 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cghqb" podStartSLOduration=3.8358726069999998 podStartE2EDuration="9.363855383s" podCreationTimestamp="2025-11-26 06:55:43 +0000 UTC" firstStartedPulling="2025-11-26 06:55:46.264046465 +0000 UTC m=+5404.447098051" lastFinishedPulling="2025-11-26 06:55:51.792029241 +0000 UTC m=+5409.975080827" observedRunningTime="2025-11-26 06:55:52.361040824 +0000 UTC m=+5410.544092440" watchObservedRunningTime="2025-11-26 06:55:52.363855383 +0000 UTC m=+5410.546906969" Nov 26 06:55:54 crc kubenswrapper[4871]: I1126 06:55:54.279691 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cghqb" Nov 26 06:55:54 crc kubenswrapper[4871]: I1126 06:55:54.281715 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cghqb" Nov 26 06:55:54 crc kubenswrapper[4871]: E1126 06:55:54.732293 4871 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.44:56622->38.102.83.44:38809: read tcp 38.102.83.44:56622->38.102.83.44:38809: read: connection reset by peer Nov 26 06:55:55 crc kubenswrapper[4871]: I1126 06:55:55.334775 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-cghqb" podUID="2c10adeb-9619-4879-b32f-57208b2b30fa" containerName="registry-server" probeResult="failure" output=< Nov 26 06:55:55 crc kubenswrapper[4871]: timeout: failed to connect service ":50051" within 1s Nov 26 06:55:55 crc kubenswrapper[4871]: > Nov 26 06:56:04 crc kubenswrapper[4871]: I1126 06:56:04.325107 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cghqb" Nov 26 06:56:04 crc kubenswrapper[4871]: I1126 06:56:04.389836 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cghqb" Nov 26 06:56:04 crc kubenswrapper[4871]: I1126 06:56:04.563659 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cghqb"] Nov 26 06:56:05 crc kubenswrapper[4871]: I1126 06:56:05.505584 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-cghqb" podUID="2c10adeb-9619-4879-b32f-57208b2b30fa" containerName="registry-server" containerID="cri-o://addc84753cd32cb0553555c0909b3e537b564c5628ccd27ce7c460e5714f2c1d" gracePeriod=2 Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.098854 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cghqb" Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.231250 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c10adeb-9619-4879-b32f-57208b2b30fa-utilities\") pod \"2c10adeb-9619-4879-b32f-57208b2b30fa\" (UID: \"2c10adeb-9619-4879-b32f-57208b2b30fa\") " Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.231807 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6plf2\" (UniqueName: \"kubernetes.io/projected/2c10adeb-9619-4879-b32f-57208b2b30fa-kube-api-access-6plf2\") pod \"2c10adeb-9619-4879-b32f-57208b2b30fa\" (UID: \"2c10adeb-9619-4879-b32f-57208b2b30fa\") " Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.231898 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c10adeb-9619-4879-b32f-57208b2b30fa-catalog-content\") pod \"2c10adeb-9619-4879-b32f-57208b2b30fa\" (UID: \"2c10adeb-9619-4879-b32f-57208b2b30fa\") " Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.232013 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c10adeb-9619-4879-b32f-57208b2b30fa-utilities" (OuterVolumeSpecName: "utilities") pod "2c10adeb-9619-4879-b32f-57208b2b30fa" (UID: "2c10adeb-9619-4879-b32f-57208b2b30fa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.232425 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c10adeb-9619-4879-b32f-57208b2b30fa-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.244137 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c10adeb-9619-4879-b32f-57208b2b30fa-kube-api-access-6plf2" (OuterVolumeSpecName: "kube-api-access-6plf2") pod "2c10adeb-9619-4879-b32f-57208b2b30fa" (UID: "2c10adeb-9619-4879-b32f-57208b2b30fa"). InnerVolumeSpecName "kube-api-access-6plf2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.334011 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6plf2\" (UniqueName: \"kubernetes.io/projected/2c10adeb-9619-4879-b32f-57208b2b30fa-kube-api-access-6plf2\") on node \"crc\" DevicePath \"\"" Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.347776 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c10adeb-9619-4879-b32f-57208b2b30fa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2c10adeb-9619-4879-b32f-57208b2b30fa" (UID: "2c10adeb-9619-4879-b32f-57208b2b30fa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.436804 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c10adeb-9619-4879-b32f-57208b2b30fa-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.524392 4871 generic.go:334] "Generic (PLEG): container finished" podID="2c10adeb-9619-4879-b32f-57208b2b30fa" containerID="addc84753cd32cb0553555c0909b3e537b564c5628ccd27ce7c460e5714f2c1d" exitCode=0 Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.524470 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cghqb" Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.544095 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cghqb" event={"ID":"2c10adeb-9619-4879-b32f-57208b2b30fa","Type":"ContainerDied","Data":"addc84753cd32cb0553555c0909b3e537b564c5628ccd27ce7c460e5714f2c1d"} Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.544148 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cghqb" event={"ID":"2c10adeb-9619-4879-b32f-57208b2b30fa","Type":"ContainerDied","Data":"bb3cc5a76a85fd37a494dd8e37bddd467178b4c1223946cf0260d7ebc5b66c80"} Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.544170 4871 scope.go:117] "RemoveContainer" containerID="addc84753cd32cb0553555c0909b3e537b564c5628ccd27ce7c460e5714f2c1d" Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.578612 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cghqb"] Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.589877 4871 scope.go:117] "RemoveContainer" containerID="e5f219aae7fcfdbadc3be63ff8e4869c0ed3664b6a9c79fc68a538daae54635c" Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.598703 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-cghqb"] Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.625402 4871 scope.go:117] "RemoveContainer" containerID="3e14590ab4604c290d0902d96ec249d08117556bf88b9d17e10bf78062b2f9a0" Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.693746 4871 scope.go:117] "RemoveContainer" containerID="addc84753cd32cb0553555c0909b3e537b564c5628ccd27ce7c460e5714f2c1d" Nov 26 06:56:06 crc kubenswrapper[4871]: E1126 06:56:06.694240 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"addc84753cd32cb0553555c0909b3e537b564c5628ccd27ce7c460e5714f2c1d\": container with ID starting with addc84753cd32cb0553555c0909b3e537b564c5628ccd27ce7c460e5714f2c1d not found: ID does not exist" containerID="addc84753cd32cb0553555c0909b3e537b564c5628ccd27ce7c460e5714f2c1d" Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.694283 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"addc84753cd32cb0553555c0909b3e537b564c5628ccd27ce7c460e5714f2c1d"} err="failed to get container status \"addc84753cd32cb0553555c0909b3e537b564c5628ccd27ce7c460e5714f2c1d\": rpc error: code = NotFound desc = could not find container \"addc84753cd32cb0553555c0909b3e537b564c5628ccd27ce7c460e5714f2c1d\": container with ID starting with addc84753cd32cb0553555c0909b3e537b564c5628ccd27ce7c460e5714f2c1d not found: ID does not exist" Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.694311 4871 scope.go:117] "RemoveContainer" containerID="e5f219aae7fcfdbadc3be63ff8e4869c0ed3664b6a9c79fc68a538daae54635c" Nov 26 06:56:06 crc kubenswrapper[4871]: E1126 06:56:06.694597 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5f219aae7fcfdbadc3be63ff8e4869c0ed3664b6a9c79fc68a538daae54635c\": container with ID starting with e5f219aae7fcfdbadc3be63ff8e4869c0ed3664b6a9c79fc68a538daae54635c not found: ID does not exist" containerID="e5f219aae7fcfdbadc3be63ff8e4869c0ed3664b6a9c79fc68a538daae54635c" Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.694632 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5f219aae7fcfdbadc3be63ff8e4869c0ed3664b6a9c79fc68a538daae54635c"} err="failed to get container status \"e5f219aae7fcfdbadc3be63ff8e4869c0ed3664b6a9c79fc68a538daae54635c\": rpc error: code = NotFound desc = could not find container \"e5f219aae7fcfdbadc3be63ff8e4869c0ed3664b6a9c79fc68a538daae54635c\": container with ID starting with e5f219aae7fcfdbadc3be63ff8e4869c0ed3664b6a9c79fc68a538daae54635c not found: ID does not exist" Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.694652 4871 scope.go:117] "RemoveContainer" containerID="3e14590ab4604c290d0902d96ec249d08117556bf88b9d17e10bf78062b2f9a0" Nov 26 06:56:06 crc kubenswrapper[4871]: E1126 06:56:06.694954 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e14590ab4604c290d0902d96ec249d08117556bf88b9d17e10bf78062b2f9a0\": container with ID starting with 3e14590ab4604c290d0902d96ec249d08117556bf88b9d17e10bf78062b2f9a0 not found: ID does not exist" containerID="3e14590ab4604c290d0902d96ec249d08117556bf88b9d17e10bf78062b2f9a0" Nov 26 06:56:06 crc kubenswrapper[4871]: I1126 06:56:06.694988 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e14590ab4604c290d0902d96ec249d08117556bf88b9d17e10bf78062b2f9a0"} err="failed to get container status \"3e14590ab4604c290d0902d96ec249d08117556bf88b9d17e10bf78062b2f9a0\": rpc error: code = NotFound desc = could not find container \"3e14590ab4604c290d0902d96ec249d08117556bf88b9d17e10bf78062b2f9a0\": container with ID starting with 3e14590ab4604c290d0902d96ec249d08117556bf88b9d17e10bf78062b2f9a0 not found: ID does not exist" Nov 26 06:56:08 crc kubenswrapper[4871]: I1126 06:56:08.530483 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c10adeb-9619-4879-b32f-57208b2b30fa" path="/var/lib/kubelet/pods/2c10adeb-9619-4879-b32f-57208b2b30fa/volumes" Nov 26 06:56:53 crc kubenswrapper[4871]: I1126 06:56:53.615199 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:56:53 crc kubenswrapper[4871]: I1126 06:56:53.615831 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:57:23 crc kubenswrapper[4871]: I1126 06:57:23.615259 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:57:23 crc kubenswrapper[4871]: I1126 06:57:23.616022 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:57:27 crc kubenswrapper[4871]: I1126 06:57:27.463173 4871 generic.go:334] "Generic (PLEG): container finished" podID="c7797f7c-aee3-49b3-accf-2072decd1ed1" containerID="ce54abd0c2bf5403069ccaa99941137d4f4502ce8d652e9e69223f714471e1cb" exitCode=0 Nov 26 06:57:27 crc kubenswrapper[4871]: I1126 06:57:27.463325 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-sl4tf/must-gather-wnbsz" event={"ID":"c7797f7c-aee3-49b3-accf-2072decd1ed1","Type":"ContainerDied","Data":"ce54abd0c2bf5403069ccaa99941137d4f4502ce8d652e9e69223f714471e1cb"} Nov 26 06:57:27 crc kubenswrapper[4871]: I1126 06:57:27.465149 4871 scope.go:117] "RemoveContainer" containerID="ce54abd0c2bf5403069ccaa99941137d4f4502ce8d652e9e69223f714471e1cb" Nov 26 06:57:28 crc kubenswrapper[4871]: I1126 06:57:28.111276 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-sl4tf_must-gather-wnbsz_c7797f7c-aee3-49b3-accf-2072decd1ed1/gather/0.log" Nov 26 06:57:37 crc kubenswrapper[4871]: I1126 06:57:37.046636 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-sl4tf/must-gather-wnbsz"] Nov 26 06:57:37 crc kubenswrapper[4871]: I1126 06:57:37.047613 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-sl4tf/must-gather-wnbsz" podUID="c7797f7c-aee3-49b3-accf-2072decd1ed1" containerName="copy" containerID="cri-o://894a2408e86126a76b6b2935a9031f4afd993a26282a630ced7e525ffba2092e" gracePeriod=2 Nov 26 06:57:37 crc kubenswrapper[4871]: I1126 06:57:37.059003 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-sl4tf/must-gather-wnbsz"] Nov 26 06:57:37 crc kubenswrapper[4871]: I1126 06:57:37.586250 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-sl4tf_must-gather-wnbsz_c7797f7c-aee3-49b3-accf-2072decd1ed1/copy/0.log" Nov 26 06:57:37 crc kubenswrapper[4871]: I1126 06:57:37.593597 4871 generic.go:334] "Generic (PLEG): container finished" podID="c7797f7c-aee3-49b3-accf-2072decd1ed1" containerID="894a2408e86126a76b6b2935a9031f4afd993a26282a630ced7e525ffba2092e" exitCode=143 Nov 26 06:57:37 crc kubenswrapper[4871]: I1126 06:57:37.593669 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="352d9bc4e8b5f1f2321bdfeaa6ab5f7cce73ccba48069475656e0624ee89ba63" Nov 26 06:57:37 crc kubenswrapper[4871]: I1126 06:57:37.678732 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-sl4tf_must-gather-wnbsz_c7797f7c-aee3-49b3-accf-2072decd1ed1/copy/0.log" Nov 26 06:57:37 crc kubenswrapper[4871]: I1126 06:57:37.679163 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sl4tf/must-gather-wnbsz" Nov 26 06:57:37 crc kubenswrapper[4871]: I1126 06:57:37.758123 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s8xzm\" (UniqueName: \"kubernetes.io/projected/c7797f7c-aee3-49b3-accf-2072decd1ed1-kube-api-access-s8xzm\") pod \"c7797f7c-aee3-49b3-accf-2072decd1ed1\" (UID: \"c7797f7c-aee3-49b3-accf-2072decd1ed1\") " Nov 26 06:57:37 crc kubenswrapper[4871]: I1126 06:57:37.758245 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c7797f7c-aee3-49b3-accf-2072decd1ed1-must-gather-output\") pod \"c7797f7c-aee3-49b3-accf-2072decd1ed1\" (UID: \"c7797f7c-aee3-49b3-accf-2072decd1ed1\") " Nov 26 06:57:37 crc kubenswrapper[4871]: I1126 06:57:37.762728 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7797f7c-aee3-49b3-accf-2072decd1ed1-kube-api-access-s8xzm" (OuterVolumeSpecName: "kube-api-access-s8xzm") pod "c7797f7c-aee3-49b3-accf-2072decd1ed1" (UID: "c7797f7c-aee3-49b3-accf-2072decd1ed1"). InnerVolumeSpecName "kube-api-access-s8xzm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:57:37 crc kubenswrapper[4871]: I1126 06:57:37.861108 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s8xzm\" (UniqueName: \"kubernetes.io/projected/c7797f7c-aee3-49b3-accf-2072decd1ed1-kube-api-access-s8xzm\") on node \"crc\" DevicePath \"\"" Nov 26 06:57:37 crc kubenswrapper[4871]: I1126 06:57:37.914566 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7797f7c-aee3-49b3-accf-2072decd1ed1-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "c7797f7c-aee3-49b3-accf-2072decd1ed1" (UID: "c7797f7c-aee3-49b3-accf-2072decd1ed1"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:57:37 crc kubenswrapper[4871]: I1126 06:57:37.962340 4871 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c7797f7c-aee3-49b3-accf-2072decd1ed1-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 26 06:57:38 crc kubenswrapper[4871]: I1126 06:57:38.517576 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7797f7c-aee3-49b3-accf-2072decd1ed1" path="/var/lib/kubelet/pods/c7797f7c-aee3-49b3-accf-2072decd1ed1/volumes" Nov 26 06:57:38 crc kubenswrapper[4871]: I1126 06:57:38.602283 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-sl4tf/must-gather-wnbsz" Nov 26 06:57:53 crc kubenswrapper[4871]: I1126 06:57:53.614764 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:57:53 crc kubenswrapper[4871]: I1126 06:57:53.615375 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 06:57:53 crc kubenswrapper[4871]: I1126 06:57:53.615450 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 06:57:53 crc kubenswrapper[4871]: I1126 06:57:53.616576 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8c8e6c6e65605e9250462f55057cd081512df064b1cb341cc927f8d5cb10ef03"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 06:57:53 crc kubenswrapper[4871]: I1126 06:57:53.616676 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://8c8e6c6e65605e9250462f55057cd081512df064b1cb341cc927f8d5cb10ef03" gracePeriod=600 Nov 26 06:57:53 crc kubenswrapper[4871]: I1126 06:57:53.819201 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="8c8e6c6e65605e9250462f55057cd081512df064b1cb341cc927f8d5cb10ef03" exitCode=0 Nov 26 06:57:53 crc kubenswrapper[4871]: I1126 06:57:53.819246 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"8c8e6c6e65605e9250462f55057cd081512df064b1cb341cc927f8d5cb10ef03"} Nov 26 06:57:53 crc kubenswrapper[4871]: I1126 06:57:53.819278 4871 scope.go:117] "RemoveContainer" containerID="a1a301503b3b56dd5d4ac6fdb468b7c9c05fa1ba445e035892e06c1805196f41" Nov 26 06:57:54 crc kubenswrapper[4871]: I1126 06:57:54.831961 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947"} Nov 26 06:58:24 crc kubenswrapper[4871]: I1126 06:58:24.891904 4871 scope.go:117] "RemoveContainer" containerID="894a2408e86126a76b6b2935a9031f4afd993a26282a630ced7e525ffba2092e" Nov 26 06:58:24 crc kubenswrapper[4871]: I1126 06:58:24.919416 4871 scope.go:117] "RemoveContainer" containerID="ce54abd0c2bf5403069ccaa99941137d4f4502ce8d652e9e69223f714471e1cb" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.645848 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-khx88"] Nov 26 06:59:02 crc kubenswrapper[4871]: E1126 06:59:02.648258 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7797f7c-aee3-49b3-accf-2072decd1ed1" containerName="copy" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.648290 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7797f7c-aee3-49b3-accf-2072decd1ed1" containerName="copy" Nov 26 06:59:02 crc kubenswrapper[4871]: E1126 06:59:02.648320 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c10adeb-9619-4879-b32f-57208b2b30fa" containerName="registry-server" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.648332 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c10adeb-9619-4879-b32f-57208b2b30fa" containerName="registry-server" Nov 26 06:59:02 crc kubenswrapper[4871]: E1126 06:59:02.648370 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c10adeb-9619-4879-b32f-57208b2b30fa" containerName="extract-content" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.648381 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c10adeb-9619-4879-b32f-57208b2b30fa" containerName="extract-content" Nov 26 06:59:02 crc kubenswrapper[4871]: E1126 06:59:02.648407 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c10adeb-9619-4879-b32f-57208b2b30fa" containerName="extract-utilities" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.648416 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c10adeb-9619-4879-b32f-57208b2b30fa" containerName="extract-utilities" Nov 26 06:59:02 crc kubenswrapper[4871]: E1126 06:59:02.648433 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7797f7c-aee3-49b3-accf-2072decd1ed1" containerName="gather" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.648442 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7797f7c-aee3-49b3-accf-2072decd1ed1" containerName="gather" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.648773 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7797f7c-aee3-49b3-accf-2072decd1ed1" containerName="gather" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.648799 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7797f7c-aee3-49b3-accf-2072decd1ed1" containerName="copy" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.648809 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c10adeb-9619-4879-b32f-57208b2b30fa" containerName="registry-server" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.650760 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-khx88" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.660696 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-khx88"] Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.754127 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/defe830d-3e44-44dd-b437-9803d0ff97c9-utilities\") pod \"community-operators-khx88\" (UID: \"defe830d-3e44-44dd-b437-9803d0ff97c9\") " pod="openshift-marketplace/community-operators-khx88" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.754319 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scmdl\" (UniqueName: \"kubernetes.io/projected/defe830d-3e44-44dd-b437-9803d0ff97c9-kube-api-access-scmdl\") pod \"community-operators-khx88\" (UID: \"defe830d-3e44-44dd-b437-9803d0ff97c9\") " pod="openshift-marketplace/community-operators-khx88" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.754362 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/defe830d-3e44-44dd-b437-9803d0ff97c9-catalog-content\") pod \"community-operators-khx88\" (UID: \"defe830d-3e44-44dd-b437-9803d0ff97c9\") " pod="openshift-marketplace/community-operators-khx88" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.833883 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hfq4m"] Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.837979 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hfq4m" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.856453 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/defe830d-3e44-44dd-b437-9803d0ff97c9-utilities\") pod \"community-operators-khx88\" (UID: \"defe830d-3e44-44dd-b437-9803d0ff97c9\") " pod="openshift-marketplace/community-operators-khx88" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.856633 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scmdl\" (UniqueName: \"kubernetes.io/projected/defe830d-3e44-44dd-b437-9803d0ff97c9-kube-api-access-scmdl\") pod \"community-operators-khx88\" (UID: \"defe830d-3e44-44dd-b437-9803d0ff97c9\") " pod="openshift-marketplace/community-operators-khx88" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.856657 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/defe830d-3e44-44dd-b437-9803d0ff97c9-catalog-content\") pod \"community-operators-khx88\" (UID: \"defe830d-3e44-44dd-b437-9803d0ff97c9\") " pod="openshift-marketplace/community-operators-khx88" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.857063 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/defe830d-3e44-44dd-b437-9803d0ff97c9-catalog-content\") pod \"community-operators-khx88\" (UID: \"defe830d-3e44-44dd-b437-9803d0ff97c9\") " pod="openshift-marketplace/community-operators-khx88" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.857240 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hfq4m"] Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.857393 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/defe830d-3e44-44dd-b437-9803d0ff97c9-utilities\") pod \"community-operators-khx88\" (UID: \"defe830d-3e44-44dd-b437-9803d0ff97c9\") " pod="openshift-marketplace/community-operators-khx88" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.909480 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scmdl\" (UniqueName: \"kubernetes.io/projected/defe830d-3e44-44dd-b437-9803d0ff97c9-kube-api-access-scmdl\") pod \"community-operators-khx88\" (UID: \"defe830d-3e44-44dd-b437-9803d0ff97c9\") " pod="openshift-marketplace/community-operators-khx88" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.958831 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cz9hv\" (UniqueName: \"kubernetes.io/projected/a184c41f-2187-4039-a291-13344e56d5f7-kube-api-access-cz9hv\") pod \"certified-operators-hfq4m\" (UID: \"a184c41f-2187-4039-a291-13344e56d5f7\") " pod="openshift-marketplace/certified-operators-hfq4m" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.959034 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a184c41f-2187-4039-a291-13344e56d5f7-catalog-content\") pod \"certified-operators-hfq4m\" (UID: \"a184c41f-2187-4039-a291-13344e56d5f7\") " pod="openshift-marketplace/certified-operators-hfq4m" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.959080 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a184c41f-2187-4039-a291-13344e56d5f7-utilities\") pod \"certified-operators-hfq4m\" (UID: \"a184c41f-2187-4039-a291-13344e56d5f7\") " pod="openshift-marketplace/certified-operators-hfq4m" Nov 26 06:59:02 crc kubenswrapper[4871]: I1126 06:59:02.982570 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-khx88" Nov 26 06:59:03 crc kubenswrapper[4871]: I1126 06:59:03.061271 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cz9hv\" (UniqueName: \"kubernetes.io/projected/a184c41f-2187-4039-a291-13344e56d5f7-kube-api-access-cz9hv\") pod \"certified-operators-hfq4m\" (UID: \"a184c41f-2187-4039-a291-13344e56d5f7\") " pod="openshift-marketplace/certified-operators-hfq4m" Nov 26 06:59:03 crc kubenswrapper[4871]: I1126 06:59:03.061432 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a184c41f-2187-4039-a291-13344e56d5f7-catalog-content\") pod \"certified-operators-hfq4m\" (UID: \"a184c41f-2187-4039-a291-13344e56d5f7\") " pod="openshift-marketplace/certified-operators-hfq4m" Nov 26 06:59:03 crc kubenswrapper[4871]: I1126 06:59:03.061472 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a184c41f-2187-4039-a291-13344e56d5f7-utilities\") pod \"certified-operators-hfq4m\" (UID: \"a184c41f-2187-4039-a291-13344e56d5f7\") " pod="openshift-marketplace/certified-operators-hfq4m" Nov 26 06:59:03 crc kubenswrapper[4871]: I1126 06:59:03.062080 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a184c41f-2187-4039-a291-13344e56d5f7-utilities\") pod \"certified-operators-hfq4m\" (UID: \"a184c41f-2187-4039-a291-13344e56d5f7\") " pod="openshift-marketplace/certified-operators-hfq4m" Nov 26 06:59:03 crc kubenswrapper[4871]: I1126 06:59:03.062698 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a184c41f-2187-4039-a291-13344e56d5f7-catalog-content\") pod \"certified-operators-hfq4m\" (UID: \"a184c41f-2187-4039-a291-13344e56d5f7\") " pod="openshift-marketplace/certified-operators-hfq4m" Nov 26 06:59:03 crc kubenswrapper[4871]: I1126 06:59:03.083345 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cz9hv\" (UniqueName: \"kubernetes.io/projected/a184c41f-2187-4039-a291-13344e56d5f7-kube-api-access-cz9hv\") pod \"certified-operators-hfq4m\" (UID: \"a184c41f-2187-4039-a291-13344e56d5f7\") " pod="openshift-marketplace/certified-operators-hfq4m" Nov 26 06:59:03 crc kubenswrapper[4871]: I1126 06:59:03.156218 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hfq4m" Nov 26 06:59:03 crc kubenswrapper[4871]: I1126 06:59:03.620325 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-khx88"] Nov 26 06:59:03 crc kubenswrapper[4871]: I1126 06:59:03.645950 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khx88" event={"ID":"defe830d-3e44-44dd-b437-9803d0ff97c9","Type":"ContainerStarted","Data":"2e9344b8ccc09a45e894bea25e7221b3d053bfcf5c7d35d8d1a79c8faa6ba316"} Nov 26 06:59:03 crc kubenswrapper[4871]: I1126 06:59:03.710911 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hfq4m"] Nov 26 06:59:03 crc kubenswrapper[4871]: W1126 06:59:03.722073 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda184c41f_2187_4039_a291_13344e56d5f7.slice/crio-9227c06d3ac633edd41ba39ca616dafe9a02f034bca5e835e290c2a3b9fda76c WatchSource:0}: Error finding container 9227c06d3ac633edd41ba39ca616dafe9a02f034bca5e835e290c2a3b9fda76c: Status 404 returned error can't find the container with id 9227c06d3ac633edd41ba39ca616dafe9a02f034bca5e835e290c2a3b9fda76c Nov 26 06:59:04 crc kubenswrapper[4871]: I1126 06:59:04.657073 4871 generic.go:334] "Generic (PLEG): container finished" podID="defe830d-3e44-44dd-b437-9803d0ff97c9" containerID="583cbfe033f6b95ba8f3853261c78e7535f414a59a5062c79bd788d39c521beb" exitCode=0 Nov 26 06:59:04 crc kubenswrapper[4871]: I1126 06:59:04.657281 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khx88" event={"ID":"defe830d-3e44-44dd-b437-9803d0ff97c9","Type":"ContainerDied","Data":"583cbfe033f6b95ba8f3853261c78e7535f414a59a5062c79bd788d39c521beb"} Nov 26 06:59:04 crc kubenswrapper[4871]: I1126 06:59:04.659320 4871 generic.go:334] "Generic (PLEG): container finished" podID="a184c41f-2187-4039-a291-13344e56d5f7" containerID="973c8a9eb1269ae167baa93807741c9b12c2e9df187cf496a020b91acb0e7a29" exitCode=0 Nov 26 06:59:04 crc kubenswrapper[4871]: I1126 06:59:04.659371 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfq4m" event={"ID":"a184c41f-2187-4039-a291-13344e56d5f7","Type":"ContainerDied","Data":"973c8a9eb1269ae167baa93807741c9b12c2e9df187cf496a020b91acb0e7a29"} Nov 26 06:59:04 crc kubenswrapper[4871]: I1126 06:59:04.659414 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfq4m" event={"ID":"a184c41f-2187-4039-a291-13344e56d5f7","Type":"ContainerStarted","Data":"9227c06d3ac633edd41ba39ca616dafe9a02f034bca5e835e290c2a3b9fda76c"} Nov 26 06:59:05 crc kubenswrapper[4871]: I1126 06:59:05.635365 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-9jsjs"] Nov 26 06:59:05 crc kubenswrapper[4871]: I1126 06:59:05.638056 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9jsjs" Nov 26 06:59:05 crc kubenswrapper[4871]: I1126 06:59:05.646186 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9jsjs"] Nov 26 06:59:05 crc kubenswrapper[4871]: I1126 06:59:05.674465 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khx88" event={"ID":"defe830d-3e44-44dd-b437-9803d0ff97c9","Type":"ContainerStarted","Data":"acba67e9fca7f07ae799ab1dad9a46be9078f495514554a70713456e4876b126"} Nov 26 06:59:05 crc kubenswrapper[4871]: I1126 06:59:05.677223 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfq4m" event={"ID":"a184c41f-2187-4039-a291-13344e56d5f7","Type":"ContainerStarted","Data":"b0dd0d835da940493d53cbcb301d34811d935f31fe9ba93e651b95690561e29f"} Nov 26 06:59:05 crc kubenswrapper[4871]: I1126 06:59:05.712615 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2d3670c-3a24-4ecf-864b-0fe9618d434a-catalog-content\") pod \"redhat-marketplace-9jsjs\" (UID: \"c2d3670c-3a24-4ecf-864b-0fe9618d434a\") " pod="openshift-marketplace/redhat-marketplace-9jsjs" Nov 26 06:59:05 crc kubenswrapper[4871]: I1126 06:59:05.712834 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2d3670c-3a24-4ecf-864b-0fe9618d434a-utilities\") pod \"redhat-marketplace-9jsjs\" (UID: \"c2d3670c-3a24-4ecf-864b-0fe9618d434a\") " pod="openshift-marketplace/redhat-marketplace-9jsjs" Nov 26 06:59:05 crc kubenswrapper[4871]: I1126 06:59:05.712859 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftjsf\" (UniqueName: \"kubernetes.io/projected/c2d3670c-3a24-4ecf-864b-0fe9618d434a-kube-api-access-ftjsf\") pod \"redhat-marketplace-9jsjs\" (UID: \"c2d3670c-3a24-4ecf-864b-0fe9618d434a\") " pod="openshift-marketplace/redhat-marketplace-9jsjs" Nov 26 06:59:05 crc kubenswrapper[4871]: I1126 06:59:05.814832 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftjsf\" (UniqueName: \"kubernetes.io/projected/c2d3670c-3a24-4ecf-864b-0fe9618d434a-kube-api-access-ftjsf\") pod \"redhat-marketplace-9jsjs\" (UID: \"c2d3670c-3a24-4ecf-864b-0fe9618d434a\") " pod="openshift-marketplace/redhat-marketplace-9jsjs" Nov 26 06:59:05 crc kubenswrapper[4871]: I1126 06:59:05.814876 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2d3670c-3a24-4ecf-864b-0fe9618d434a-utilities\") pod \"redhat-marketplace-9jsjs\" (UID: \"c2d3670c-3a24-4ecf-864b-0fe9618d434a\") " pod="openshift-marketplace/redhat-marketplace-9jsjs" Nov 26 06:59:05 crc kubenswrapper[4871]: I1126 06:59:05.814966 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2d3670c-3a24-4ecf-864b-0fe9618d434a-catalog-content\") pod \"redhat-marketplace-9jsjs\" (UID: \"c2d3670c-3a24-4ecf-864b-0fe9618d434a\") " pod="openshift-marketplace/redhat-marketplace-9jsjs" Nov 26 06:59:05 crc kubenswrapper[4871]: I1126 06:59:05.815432 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2d3670c-3a24-4ecf-864b-0fe9618d434a-catalog-content\") pod \"redhat-marketplace-9jsjs\" (UID: \"c2d3670c-3a24-4ecf-864b-0fe9618d434a\") " pod="openshift-marketplace/redhat-marketplace-9jsjs" Nov 26 06:59:05 crc kubenswrapper[4871]: I1126 06:59:05.815639 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2d3670c-3a24-4ecf-864b-0fe9618d434a-utilities\") pod \"redhat-marketplace-9jsjs\" (UID: \"c2d3670c-3a24-4ecf-864b-0fe9618d434a\") " pod="openshift-marketplace/redhat-marketplace-9jsjs" Nov 26 06:59:05 crc kubenswrapper[4871]: I1126 06:59:05.838107 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftjsf\" (UniqueName: \"kubernetes.io/projected/c2d3670c-3a24-4ecf-864b-0fe9618d434a-kube-api-access-ftjsf\") pod \"redhat-marketplace-9jsjs\" (UID: \"c2d3670c-3a24-4ecf-864b-0fe9618d434a\") " pod="openshift-marketplace/redhat-marketplace-9jsjs" Nov 26 06:59:05 crc kubenswrapper[4871]: I1126 06:59:05.954417 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9jsjs" Nov 26 06:59:06 crc kubenswrapper[4871]: W1126 06:59:06.417698 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2d3670c_3a24_4ecf_864b_0fe9618d434a.slice/crio-7cba8e621e23db4b46ce2ba0d53a82e8db5ebc9482908ed08d85bf7cb19ff652 WatchSource:0}: Error finding container 7cba8e621e23db4b46ce2ba0d53a82e8db5ebc9482908ed08d85bf7cb19ff652: Status 404 returned error can't find the container with id 7cba8e621e23db4b46ce2ba0d53a82e8db5ebc9482908ed08d85bf7cb19ff652 Nov 26 06:59:06 crc kubenswrapper[4871]: I1126 06:59:06.421627 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9jsjs"] Nov 26 06:59:06 crc kubenswrapper[4871]: I1126 06:59:06.690000 4871 generic.go:334] "Generic (PLEG): container finished" podID="c2d3670c-3a24-4ecf-864b-0fe9618d434a" containerID="558bd32e680e183c9a769d843f7e5f622e95e5a5731f6e7398aa19507f615ee3" exitCode=0 Nov 26 06:59:06 crc kubenswrapper[4871]: I1126 06:59:06.690229 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9jsjs" event={"ID":"c2d3670c-3a24-4ecf-864b-0fe9618d434a","Type":"ContainerDied","Data":"558bd32e680e183c9a769d843f7e5f622e95e5a5731f6e7398aa19507f615ee3"} Nov 26 06:59:06 crc kubenswrapper[4871]: I1126 06:59:06.690393 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9jsjs" event={"ID":"c2d3670c-3a24-4ecf-864b-0fe9618d434a","Type":"ContainerStarted","Data":"7cba8e621e23db4b46ce2ba0d53a82e8db5ebc9482908ed08d85bf7cb19ff652"} Nov 26 06:59:07 crc kubenswrapper[4871]: I1126 06:59:07.705019 4871 generic.go:334] "Generic (PLEG): container finished" podID="a184c41f-2187-4039-a291-13344e56d5f7" containerID="b0dd0d835da940493d53cbcb301d34811d935f31fe9ba93e651b95690561e29f" exitCode=0 Nov 26 06:59:07 crc kubenswrapper[4871]: I1126 06:59:07.705099 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfq4m" event={"ID":"a184c41f-2187-4039-a291-13344e56d5f7","Type":"ContainerDied","Data":"b0dd0d835da940493d53cbcb301d34811d935f31fe9ba93e651b95690561e29f"} Nov 26 06:59:07 crc kubenswrapper[4871]: I1126 06:59:07.715741 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9jsjs" event={"ID":"c2d3670c-3a24-4ecf-864b-0fe9618d434a","Type":"ContainerStarted","Data":"9bdd8c88980b88f4723542ec2641b8219b7cf96767ba4303cbe69e2a08106d31"} Nov 26 06:59:07 crc kubenswrapper[4871]: I1126 06:59:07.719015 4871 generic.go:334] "Generic (PLEG): container finished" podID="defe830d-3e44-44dd-b437-9803d0ff97c9" containerID="acba67e9fca7f07ae799ab1dad9a46be9078f495514554a70713456e4876b126" exitCode=0 Nov 26 06:59:07 crc kubenswrapper[4871]: I1126 06:59:07.719074 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khx88" event={"ID":"defe830d-3e44-44dd-b437-9803d0ff97c9","Type":"ContainerDied","Data":"acba67e9fca7f07ae799ab1dad9a46be9078f495514554a70713456e4876b126"} Nov 26 06:59:08 crc kubenswrapper[4871]: I1126 06:59:08.729303 4871 generic.go:334] "Generic (PLEG): container finished" podID="c2d3670c-3a24-4ecf-864b-0fe9618d434a" containerID="9bdd8c88980b88f4723542ec2641b8219b7cf96767ba4303cbe69e2a08106d31" exitCode=0 Nov 26 06:59:08 crc kubenswrapper[4871]: I1126 06:59:08.729386 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9jsjs" event={"ID":"c2d3670c-3a24-4ecf-864b-0fe9618d434a","Type":"ContainerDied","Data":"9bdd8c88980b88f4723542ec2641b8219b7cf96767ba4303cbe69e2a08106d31"} Nov 26 06:59:08 crc kubenswrapper[4871]: I1126 06:59:08.732597 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khx88" event={"ID":"defe830d-3e44-44dd-b437-9803d0ff97c9","Type":"ContainerStarted","Data":"14304dc0503d174305e74ac2cc87f7849033ec59f8fad7061456c6f3f725a518"} Nov 26 06:59:08 crc kubenswrapper[4871]: I1126 06:59:08.736668 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfq4m" event={"ID":"a184c41f-2187-4039-a291-13344e56d5f7","Type":"ContainerStarted","Data":"f6bd289466a5861a9d5232561025480c9c8b8996f68b175007de17d60502d79e"} Nov 26 06:59:08 crc kubenswrapper[4871]: I1126 06:59:08.782919 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hfq4m" podStartSLOduration=3.317751329 podStartE2EDuration="6.78289152s" podCreationTimestamp="2025-11-26 06:59:02 +0000 UTC" firstStartedPulling="2025-11-26 06:59:04.661119427 +0000 UTC m=+5602.844171023" lastFinishedPulling="2025-11-26 06:59:08.126259628 +0000 UTC m=+5606.309311214" observedRunningTime="2025-11-26 06:59:08.774709957 +0000 UTC m=+5606.957761563" watchObservedRunningTime="2025-11-26 06:59:08.78289152 +0000 UTC m=+5606.965943106" Nov 26 06:59:08 crc kubenswrapper[4871]: I1126 06:59:08.794853 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-khx88" podStartSLOduration=3.324033803 podStartE2EDuration="6.794829595s" podCreationTimestamp="2025-11-26 06:59:02 +0000 UTC" firstStartedPulling="2025-11-26 06:59:04.660413129 +0000 UTC m=+5602.843464735" lastFinishedPulling="2025-11-26 06:59:08.131208941 +0000 UTC m=+5606.314260527" observedRunningTime="2025-11-26 06:59:08.790303683 +0000 UTC m=+5606.973355269" watchObservedRunningTime="2025-11-26 06:59:08.794829595 +0000 UTC m=+5606.977881181" Nov 26 06:59:09 crc kubenswrapper[4871]: I1126 06:59:09.751035 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9jsjs" event={"ID":"c2d3670c-3a24-4ecf-864b-0fe9618d434a","Type":"ContainerStarted","Data":"b506a7a0d6e88b7529b6fd576ec6068a1304b551b3084274ec3d0affb282b262"} Nov 26 06:59:09 crc kubenswrapper[4871]: I1126 06:59:09.776485 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-9jsjs" podStartSLOduration=2.3063729459999998 podStartE2EDuration="4.776467s" podCreationTimestamp="2025-11-26 06:59:05 +0000 UTC" firstStartedPulling="2025-11-26 06:59:06.695047075 +0000 UTC m=+5604.878098661" lastFinishedPulling="2025-11-26 06:59:09.165141109 +0000 UTC m=+5607.348192715" observedRunningTime="2025-11-26 06:59:09.774980443 +0000 UTC m=+5607.958032029" watchObservedRunningTime="2025-11-26 06:59:09.776467 +0000 UTC m=+5607.959518606" Nov 26 06:59:12 crc kubenswrapper[4871]: I1126 06:59:12.982876 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-khx88" Nov 26 06:59:12 crc kubenswrapper[4871]: I1126 06:59:12.983227 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-khx88" Nov 26 06:59:13 crc kubenswrapper[4871]: I1126 06:59:13.052443 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-khx88" Nov 26 06:59:13 crc kubenswrapper[4871]: I1126 06:59:13.157184 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hfq4m" Nov 26 06:59:13 crc kubenswrapper[4871]: I1126 06:59:13.157297 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hfq4m" Nov 26 06:59:13 crc kubenswrapper[4871]: I1126 06:59:13.212770 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hfq4m" Nov 26 06:59:13 crc kubenswrapper[4871]: I1126 06:59:13.848398 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hfq4m" Nov 26 06:59:13 crc kubenswrapper[4871]: I1126 06:59:13.866718 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-khx88" Nov 26 06:59:15 crc kubenswrapper[4871]: I1126 06:59:15.030359 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-khx88"] Nov 26 06:59:15 crc kubenswrapper[4871]: I1126 06:59:15.629180 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hfq4m"] Nov 26 06:59:15 crc kubenswrapper[4871]: I1126 06:59:15.815793 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-khx88" podUID="defe830d-3e44-44dd-b437-9803d0ff97c9" containerName="registry-server" containerID="cri-o://14304dc0503d174305e74ac2cc87f7849033ec59f8fad7061456c6f3f725a518" gracePeriod=2 Nov 26 06:59:15 crc kubenswrapper[4871]: I1126 06:59:15.955219 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-9jsjs" Nov 26 06:59:15 crc kubenswrapper[4871]: I1126 06:59:15.956333 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-9jsjs" Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.065082 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-9jsjs" Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.390684 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-khx88" Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.523219 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-scmdl\" (UniqueName: \"kubernetes.io/projected/defe830d-3e44-44dd-b437-9803d0ff97c9-kube-api-access-scmdl\") pod \"defe830d-3e44-44dd-b437-9803d0ff97c9\" (UID: \"defe830d-3e44-44dd-b437-9803d0ff97c9\") " Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.523323 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/defe830d-3e44-44dd-b437-9803d0ff97c9-utilities\") pod \"defe830d-3e44-44dd-b437-9803d0ff97c9\" (UID: \"defe830d-3e44-44dd-b437-9803d0ff97c9\") " Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.523410 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/defe830d-3e44-44dd-b437-9803d0ff97c9-catalog-content\") pod \"defe830d-3e44-44dd-b437-9803d0ff97c9\" (UID: \"defe830d-3e44-44dd-b437-9803d0ff97c9\") " Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.524066 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/defe830d-3e44-44dd-b437-9803d0ff97c9-utilities" (OuterVolumeSpecName: "utilities") pod "defe830d-3e44-44dd-b437-9803d0ff97c9" (UID: "defe830d-3e44-44dd-b437-9803d0ff97c9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.528002 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/defe830d-3e44-44dd-b437-9803d0ff97c9-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.561934 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/defe830d-3e44-44dd-b437-9803d0ff97c9-kube-api-access-scmdl" (OuterVolumeSpecName: "kube-api-access-scmdl") pod "defe830d-3e44-44dd-b437-9803d0ff97c9" (UID: "defe830d-3e44-44dd-b437-9803d0ff97c9"). InnerVolumeSpecName "kube-api-access-scmdl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.632710 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-scmdl\" (UniqueName: \"kubernetes.io/projected/defe830d-3e44-44dd-b437-9803d0ff97c9-kube-api-access-scmdl\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.824932 4871 generic.go:334] "Generic (PLEG): container finished" podID="defe830d-3e44-44dd-b437-9803d0ff97c9" containerID="14304dc0503d174305e74ac2cc87f7849033ec59f8fad7061456c6f3f725a518" exitCode=0 Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.825652 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khx88" event={"ID":"defe830d-3e44-44dd-b437-9803d0ff97c9","Type":"ContainerDied","Data":"14304dc0503d174305e74ac2cc87f7849033ec59f8fad7061456c6f3f725a518"} Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.825736 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khx88" event={"ID":"defe830d-3e44-44dd-b437-9803d0ff97c9","Type":"ContainerDied","Data":"2e9344b8ccc09a45e894bea25e7221b3d053bfcf5c7d35d8d1a79c8faa6ba316"} Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.825768 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-khx88" Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.825773 4871 scope.go:117] "RemoveContainer" containerID="14304dc0503d174305e74ac2cc87f7849033ec59f8fad7061456c6f3f725a518" Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.825926 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hfq4m" podUID="a184c41f-2187-4039-a291-13344e56d5f7" containerName="registry-server" containerID="cri-o://f6bd289466a5861a9d5232561025480c9c8b8996f68b175007de17d60502d79e" gracePeriod=2 Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.850311 4871 scope.go:117] "RemoveContainer" containerID="acba67e9fca7f07ae799ab1dad9a46be9078f495514554a70713456e4876b126" Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.872725 4871 scope.go:117] "RemoveContainer" containerID="583cbfe033f6b95ba8f3853261c78e7535f414a59a5062c79bd788d39c521beb" Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.891810 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-9jsjs" Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.932881 4871 scope.go:117] "RemoveContainer" containerID="14304dc0503d174305e74ac2cc87f7849033ec59f8fad7061456c6f3f725a518" Nov 26 06:59:16 crc kubenswrapper[4871]: E1126 06:59:16.933309 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14304dc0503d174305e74ac2cc87f7849033ec59f8fad7061456c6f3f725a518\": container with ID starting with 14304dc0503d174305e74ac2cc87f7849033ec59f8fad7061456c6f3f725a518 not found: ID does not exist" containerID="14304dc0503d174305e74ac2cc87f7849033ec59f8fad7061456c6f3f725a518" Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.933341 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14304dc0503d174305e74ac2cc87f7849033ec59f8fad7061456c6f3f725a518"} err="failed to get container status \"14304dc0503d174305e74ac2cc87f7849033ec59f8fad7061456c6f3f725a518\": rpc error: code = NotFound desc = could not find container \"14304dc0503d174305e74ac2cc87f7849033ec59f8fad7061456c6f3f725a518\": container with ID starting with 14304dc0503d174305e74ac2cc87f7849033ec59f8fad7061456c6f3f725a518 not found: ID does not exist" Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.933363 4871 scope.go:117] "RemoveContainer" containerID="acba67e9fca7f07ae799ab1dad9a46be9078f495514554a70713456e4876b126" Nov 26 06:59:16 crc kubenswrapper[4871]: E1126 06:59:16.933653 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"acba67e9fca7f07ae799ab1dad9a46be9078f495514554a70713456e4876b126\": container with ID starting with acba67e9fca7f07ae799ab1dad9a46be9078f495514554a70713456e4876b126 not found: ID does not exist" containerID="acba67e9fca7f07ae799ab1dad9a46be9078f495514554a70713456e4876b126" Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.933687 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"acba67e9fca7f07ae799ab1dad9a46be9078f495514554a70713456e4876b126"} err="failed to get container status \"acba67e9fca7f07ae799ab1dad9a46be9078f495514554a70713456e4876b126\": rpc error: code = NotFound desc = could not find container \"acba67e9fca7f07ae799ab1dad9a46be9078f495514554a70713456e4876b126\": container with ID starting with acba67e9fca7f07ae799ab1dad9a46be9078f495514554a70713456e4876b126 not found: ID does not exist" Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.933704 4871 scope.go:117] "RemoveContainer" containerID="583cbfe033f6b95ba8f3853261c78e7535f414a59a5062c79bd788d39c521beb" Nov 26 06:59:16 crc kubenswrapper[4871]: E1126 06:59:16.933890 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"583cbfe033f6b95ba8f3853261c78e7535f414a59a5062c79bd788d39c521beb\": container with ID starting with 583cbfe033f6b95ba8f3853261c78e7535f414a59a5062c79bd788d39c521beb not found: ID does not exist" containerID="583cbfe033f6b95ba8f3853261c78e7535f414a59a5062c79bd788d39c521beb" Nov 26 06:59:16 crc kubenswrapper[4871]: I1126 06:59:16.933916 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"583cbfe033f6b95ba8f3853261c78e7535f414a59a5062c79bd788d39c521beb"} err="failed to get container status \"583cbfe033f6b95ba8f3853261c78e7535f414a59a5062c79bd788d39c521beb\": rpc error: code = NotFound desc = could not find container \"583cbfe033f6b95ba8f3853261c78e7535f414a59a5062c79bd788d39c521beb\": container with ID starting with 583cbfe033f6b95ba8f3853261c78e7535f414a59a5062c79bd788d39c521beb not found: ID does not exist" Nov 26 06:59:17 crc kubenswrapper[4871]: I1126 06:59:17.543022 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/defe830d-3e44-44dd-b437-9803d0ff97c9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "defe830d-3e44-44dd-b437-9803d0ff97c9" (UID: "defe830d-3e44-44dd-b437-9803d0ff97c9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:59:17 crc kubenswrapper[4871]: I1126 06:59:17.621666 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/defe830d-3e44-44dd-b437-9803d0ff97c9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:17 crc kubenswrapper[4871]: I1126 06:59:17.774316 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-khx88"] Nov 26 06:59:17 crc kubenswrapper[4871]: I1126 06:59:17.787760 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-khx88"] Nov 26 06:59:17 crc kubenswrapper[4871]: I1126 06:59:17.839985 4871 generic.go:334] "Generic (PLEG): container finished" podID="a184c41f-2187-4039-a291-13344e56d5f7" containerID="f6bd289466a5861a9d5232561025480c9c8b8996f68b175007de17d60502d79e" exitCode=0 Nov 26 06:59:17 crc kubenswrapper[4871]: I1126 06:59:17.840097 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfq4m" event={"ID":"a184c41f-2187-4039-a291-13344e56d5f7","Type":"ContainerDied","Data":"f6bd289466a5861a9d5232561025480c9c8b8996f68b175007de17d60502d79e"} Nov 26 06:59:18 crc kubenswrapper[4871]: I1126 06:59:18.379410 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hfq4m" Nov 26 06:59:18 crc kubenswrapper[4871]: I1126 06:59:18.438868 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cz9hv\" (UniqueName: \"kubernetes.io/projected/a184c41f-2187-4039-a291-13344e56d5f7-kube-api-access-cz9hv\") pod \"a184c41f-2187-4039-a291-13344e56d5f7\" (UID: \"a184c41f-2187-4039-a291-13344e56d5f7\") " Nov 26 06:59:18 crc kubenswrapper[4871]: I1126 06:59:18.439002 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a184c41f-2187-4039-a291-13344e56d5f7-catalog-content\") pod \"a184c41f-2187-4039-a291-13344e56d5f7\" (UID: \"a184c41f-2187-4039-a291-13344e56d5f7\") " Nov 26 06:59:18 crc kubenswrapper[4871]: I1126 06:59:18.439054 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a184c41f-2187-4039-a291-13344e56d5f7-utilities\") pod \"a184c41f-2187-4039-a291-13344e56d5f7\" (UID: \"a184c41f-2187-4039-a291-13344e56d5f7\") " Nov 26 06:59:18 crc kubenswrapper[4871]: I1126 06:59:18.440360 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a184c41f-2187-4039-a291-13344e56d5f7-utilities" (OuterVolumeSpecName: "utilities") pod "a184c41f-2187-4039-a291-13344e56d5f7" (UID: "a184c41f-2187-4039-a291-13344e56d5f7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:59:18 crc kubenswrapper[4871]: I1126 06:59:18.444923 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a184c41f-2187-4039-a291-13344e56d5f7-kube-api-access-cz9hv" (OuterVolumeSpecName: "kube-api-access-cz9hv") pod "a184c41f-2187-4039-a291-13344e56d5f7" (UID: "a184c41f-2187-4039-a291-13344e56d5f7"). InnerVolumeSpecName "kube-api-access-cz9hv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:59:18 crc kubenswrapper[4871]: I1126 06:59:18.510445 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a184c41f-2187-4039-a291-13344e56d5f7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a184c41f-2187-4039-a291-13344e56d5f7" (UID: "a184c41f-2187-4039-a291-13344e56d5f7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:59:18 crc kubenswrapper[4871]: I1126 06:59:18.523136 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="defe830d-3e44-44dd-b437-9803d0ff97c9" path="/var/lib/kubelet/pods/defe830d-3e44-44dd-b437-9803d0ff97c9/volumes" Nov 26 06:59:18 crc kubenswrapper[4871]: I1126 06:59:18.540515 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cz9hv\" (UniqueName: \"kubernetes.io/projected/a184c41f-2187-4039-a291-13344e56d5f7-kube-api-access-cz9hv\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:18 crc kubenswrapper[4871]: I1126 06:59:18.540564 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a184c41f-2187-4039-a291-13344e56d5f7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:18 crc kubenswrapper[4871]: I1126 06:59:18.540573 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a184c41f-2187-4039-a291-13344e56d5f7-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:18 crc kubenswrapper[4871]: I1126 06:59:18.860690 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hfq4m" Nov 26 06:59:18 crc kubenswrapper[4871]: I1126 06:59:18.860816 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfq4m" event={"ID":"a184c41f-2187-4039-a291-13344e56d5f7","Type":"ContainerDied","Data":"9227c06d3ac633edd41ba39ca616dafe9a02f034bca5e835e290c2a3b9fda76c"} Nov 26 06:59:18 crc kubenswrapper[4871]: I1126 06:59:18.860913 4871 scope.go:117] "RemoveContainer" containerID="f6bd289466a5861a9d5232561025480c9c8b8996f68b175007de17d60502d79e" Nov 26 06:59:18 crc kubenswrapper[4871]: I1126 06:59:18.900766 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hfq4m"] Nov 26 06:59:18 crc kubenswrapper[4871]: I1126 06:59:18.904297 4871 scope.go:117] "RemoveContainer" containerID="b0dd0d835da940493d53cbcb301d34811d935f31fe9ba93e651b95690561e29f" Nov 26 06:59:18 crc kubenswrapper[4871]: I1126 06:59:18.918259 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hfq4m"] Nov 26 06:59:18 crc kubenswrapper[4871]: I1126 06:59:18.946920 4871 scope.go:117] "RemoveContainer" containerID="973c8a9eb1269ae167baa93807741c9b12c2e9df187cf496a020b91acb0e7a29" Nov 26 06:59:19 crc kubenswrapper[4871]: I1126 06:59:19.428024 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9jsjs"] Nov 26 06:59:19 crc kubenswrapper[4871]: I1126 06:59:19.874882 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-9jsjs" podUID="c2d3670c-3a24-4ecf-864b-0fe9618d434a" containerName="registry-server" containerID="cri-o://b506a7a0d6e88b7529b6fd576ec6068a1304b551b3084274ec3d0affb282b262" gracePeriod=2 Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.331467 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9jsjs" Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.477882 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftjsf\" (UniqueName: \"kubernetes.io/projected/c2d3670c-3a24-4ecf-864b-0fe9618d434a-kube-api-access-ftjsf\") pod \"c2d3670c-3a24-4ecf-864b-0fe9618d434a\" (UID: \"c2d3670c-3a24-4ecf-864b-0fe9618d434a\") " Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.478261 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2d3670c-3a24-4ecf-864b-0fe9618d434a-utilities\") pod \"c2d3670c-3a24-4ecf-864b-0fe9618d434a\" (UID: \"c2d3670c-3a24-4ecf-864b-0fe9618d434a\") " Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.478359 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2d3670c-3a24-4ecf-864b-0fe9618d434a-catalog-content\") pod \"c2d3670c-3a24-4ecf-864b-0fe9618d434a\" (UID: \"c2d3670c-3a24-4ecf-864b-0fe9618d434a\") " Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.481004 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2d3670c-3a24-4ecf-864b-0fe9618d434a-utilities" (OuterVolumeSpecName: "utilities") pod "c2d3670c-3a24-4ecf-864b-0fe9618d434a" (UID: "c2d3670c-3a24-4ecf-864b-0fe9618d434a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.483890 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2d3670c-3a24-4ecf-864b-0fe9618d434a-kube-api-access-ftjsf" (OuterVolumeSpecName: "kube-api-access-ftjsf") pod "c2d3670c-3a24-4ecf-864b-0fe9618d434a" (UID: "c2d3670c-3a24-4ecf-864b-0fe9618d434a"). InnerVolumeSpecName "kube-api-access-ftjsf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.494591 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2d3670c-3a24-4ecf-864b-0fe9618d434a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c2d3670c-3a24-4ecf-864b-0fe9618d434a" (UID: "c2d3670c-3a24-4ecf-864b-0fe9618d434a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.525577 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a184c41f-2187-4039-a291-13344e56d5f7" path="/var/lib/kubelet/pods/a184c41f-2187-4039-a291-13344e56d5f7/volumes" Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.581344 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2d3670c-3a24-4ecf-864b-0fe9618d434a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.581379 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftjsf\" (UniqueName: \"kubernetes.io/projected/c2d3670c-3a24-4ecf-864b-0fe9618d434a-kube-api-access-ftjsf\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.581391 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2d3670c-3a24-4ecf-864b-0fe9618d434a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.907111 4871 generic.go:334] "Generic (PLEG): container finished" podID="c2d3670c-3a24-4ecf-864b-0fe9618d434a" containerID="b506a7a0d6e88b7529b6fd576ec6068a1304b551b3084274ec3d0affb282b262" exitCode=0 Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.907169 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9jsjs" event={"ID":"c2d3670c-3a24-4ecf-864b-0fe9618d434a","Type":"ContainerDied","Data":"b506a7a0d6e88b7529b6fd576ec6068a1304b551b3084274ec3d0affb282b262"} Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.907204 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9jsjs" event={"ID":"c2d3670c-3a24-4ecf-864b-0fe9618d434a","Type":"ContainerDied","Data":"7cba8e621e23db4b46ce2ba0d53a82e8db5ebc9482908ed08d85bf7cb19ff652"} Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.907240 4871 scope.go:117] "RemoveContainer" containerID="b506a7a0d6e88b7529b6fd576ec6068a1304b551b3084274ec3d0affb282b262" Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.907271 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9jsjs" Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.942031 4871 scope.go:117] "RemoveContainer" containerID="9bdd8c88980b88f4723542ec2641b8219b7cf96767ba4303cbe69e2a08106d31" Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.957626 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9jsjs"] Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.966978 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-9jsjs"] Nov 26 06:59:20 crc kubenswrapper[4871]: I1126 06:59:20.977211 4871 scope.go:117] "RemoveContainer" containerID="558bd32e680e183c9a769d843f7e5f622e95e5a5731f6e7398aa19507f615ee3" Nov 26 06:59:21 crc kubenswrapper[4871]: I1126 06:59:21.037951 4871 scope.go:117] "RemoveContainer" containerID="b506a7a0d6e88b7529b6fd576ec6068a1304b551b3084274ec3d0affb282b262" Nov 26 06:59:21 crc kubenswrapper[4871]: E1126 06:59:21.043956 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b506a7a0d6e88b7529b6fd576ec6068a1304b551b3084274ec3d0affb282b262\": container with ID starting with b506a7a0d6e88b7529b6fd576ec6068a1304b551b3084274ec3d0affb282b262 not found: ID does not exist" containerID="b506a7a0d6e88b7529b6fd576ec6068a1304b551b3084274ec3d0affb282b262" Nov 26 06:59:21 crc kubenswrapper[4871]: I1126 06:59:21.043990 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b506a7a0d6e88b7529b6fd576ec6068a1304b551b3084274ec3d0affb282b262"} err="failed to get container status \"b506a7a0d6e88b7529b6fd576ec6068a1304b551b3084274ec3d0affb282b262\": rpc error: code = NotFound desc = could not find container \"b506a7a0d6e88b7529b6fd576ec6068a1304b551b3084274ec3d0affb282b262\": container with ID starting with b506a7a0d6e88b7529b6fd576ec6068a1304b551b3084274ec3d0affb282b262 not found: ID does not exist" Nov 26 06:59:21 crc kubenswrapper[4871]: I1126 06:59:21.044012 4871 scope.go:117] "RemoveContainer" containerID="9bdd8c88980b88f4723542ec2641b8219b7cf96767ba4303cbe69e2a08106d31" Nov 26 06:59:21 crc kubenswrapper[4871]: E1126 06:59:21.044350 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9bdd8c88980b88f4723542ec2641b8219b7cf96767ba4303cbe69e2a08106d31\": container with ID starting with 9bdd8c88980b88f4723542ec2641b8219b7cf96767ba4303cbe69e2a08106d31 not found: ID does not exist" containerID="9bdd8c88980b88f4723542ec2641b8219b7cf96767ba4303cbe69e2a08106d31" Nov 26 06:59:21 crc kubenswrapper[4871]: I1126 06:59:21.044370 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9bdd8c88980b88f4723542ec2641b8219b7cf96767ba4303cbe69e2a08106d31"} err="failed to get container status \"9bdd8c88980b88f4723542ec2641b8219b7cf96767ba4303cbe69e2a08106d31\": rpc error: code = NotFound desc = could not find container \"9bdd8c88980b88f4723542ec2641b8219b7cf96767ba4303cbe69e2a08106d31\": container with ID starting with 9bdd8c88980b88f4723542ec2641b8219b7cf96767ba4303cbe69e2a08106d31 not found: ID does not exist" Nov 26 06:59:21 crc kubenswrapper[4871]: I1126 06:59:21.044386 4871 scope.go:117] "RemoveContainer" containerID="558bd32e680e183c9a769d843f7e5f622e95e5a5731f6e7398aa19507f615ee3" Nov 26 06:59:21 crc kubenswrapper[4871]: E1126 06:59:21.044800 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"558bd32e680e183c9a769d843f7e5f622e95e5a5731f6e7398aa19507f615ee3\": container with ID starting with 558bd32e680e183c9a769d843f7e5f622e95e5a5731f6e7398aa19507f615ee3 not found: ID does not exist" containerID="558bd32e680e183c9a769d843f7e5f622e95e5a5731f6e7398aa19507f615ee3" Nov 26 06:59:21 crc kubenswrapper[4871]: I1126 06:59:21.044849 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"558bd32e680e183c9a769d843f7e5f622e95e5a5731f6e7398aa19507f615ee3"} err="failed to get container status \"558bd32e680e183c9a769d843f7e5f622e95e5a5731f6e7398aa19507f615ee3\": rpc error: code = NotFound desc = could not find container \"558bd32e680e183c9a769d843f7e5f622e95e5a5731f6e7398aa19507f615ee3\": container with ID starting with 558bd32e680e183c9a769d843f7e5f622e95e5a5731f6e7398aa19507f615ee3 not found: ID does not exist" Nov 26 06:59:22 crc kubenswrapper[4871]: I1126 06:59:22.518039 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2d3670c-3a24-4ecf-864b-0fe9618d434a" path="/var/lib/kubelet/pods/c2d3670c-3a24-4ecf-864b-0fe9618d434a/volumes" Nov 26 06:59:25 crc kubenswrapper[4871]: I1126 06:59:25.001816 4871 scope.go:117] "RemoveContainer" containerID="cfaf619691a907279ef5f65726978a6840920f77a2fb7b9867c3918ed012febf" Nov 26 06:59:53 crc kubenswrapper[4871]: I1126 06:59:53.614842 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 06:59:53 crc kubenswrapper[4871]: I1126 06:59:53.615392 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.172208 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4"] Nov 26 07:00:00 crc kubenswrapper[4871]: E1126 07:00:00.174392 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="defe830d-3e44-44dd-b437-9803d0ff97c9" containerName="extract-utilities" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.174504 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="defe830d-3e44-44dd-b437-9803d0ff97c9" containerName="extract-utilities" Nov 26 07:00:00 crc kubenswrapper[4871]: E1126 07:00:00.174610 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a184c41f-2187-4039-a291-13344e56d5f7" containerName="registry-server" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.174671 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="a184c41f-2187-4039-a291-13344e56d5f7" containerName="registry-server" Nov 26 07:00:00 crc kubenswrapper[4871]: E1126 07:00:00.174748 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2d3670c-3a24-4ecf-864b-0fe9618d434a" containerName="registry-server" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.174818 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2d3670c-3a24-4ecf-864b-0fe9618d434a" containerName="registry-server" Nov 26 07:00:00 crc kubenswrapper[4871]: E1126 07:00:00.175292 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2d3670c-3a24-4ecf-864b-0fe9618d434a" containerName="extract-utilities" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.175461 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2d3670c-3a24-4ecf-864b-0fe9618d434a" containerName="extract-utilities" Nov 26 07:00:00 crc kubenswrapper[4871]: E1126 07:00:00.175563 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="defe830d-3e44-44dd-b437-9803d0ff97c9" containerName="extract-content" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.175626 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="defe830d-3e44-44dd-b437-9803d0ff97c9" containerName="extract-content" Nov 26 07:00:00 crc kubenswrapper[4871]: E1126 07:00:00.175692 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="defe830d-3e44-44dd-b437-9803d0ff97c9" containerName="registry-server" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.175748 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="defe830d-3e44-44dd-b437-9803d0ff97c9" containerName="registry-server" Nov 26 07:00:00 crc kubenswrapper[4871]: E1126 07:00:00.175804 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a184c41f-2187-4039-a291-13344e56d5f7" containerName="extract-utilities" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.175861 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="a184c41f-2187-4039-a291-13344e56d5f7" containerName="extract-utilities" Nov 26 07:00:00 crc kubenswrapper[4871]: E1126 07:00:00.175929 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2d3670c-3a24-4ecf-864b-0fe9618d434a" containerName="extract-content" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.175987 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2d3670c-3a24-4ecf-864b-0fe9618d434a" containerName="extract-content" Nov 26 07:00:00 crc kubenswrapper[4871]: E1126 07:00:00.176060 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a184c41f-2187-4039-a291-13344e56d5f7" containerName="extract-content" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.176119 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="a184c41f-2187-4039-a291-13344e56d5f7" containerName="extract-content" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.176645 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="defe830d-3e44-44dd-b437-9803d0ff97c9" containerName="registry-server" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.176759 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="a184c41f-2187-4039-a291-13344e56d5f7" containerName="registry-server" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.176846 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2d3670c-3a24-4ecf-864b-0fe9618d434a" containerName="registry-server" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.177854 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.180685 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.185056 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.186344 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4"] Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.346334 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3343b437-ae99-41ef-a0ae-c0b141dfb798-secret-volume\") pod \"collect-profiles-29402340-7xmp4\" (UID: \"3343b437-ae99-41ef-a0ae-c0b141dfb798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.346377 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnq26\" (UniqueName: \"kubernetes.io/projected/3343b437-ae99-41ef-a0ae-c0b141dfb798-kube-api-access-dnq26\") pod \"collect-profiles-29402340-7xmp4\" (UID: \"3343b437-ae99-41ef-a0ae-c0b141dfb798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.346683 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3343b437-ae99-41ef-a0ae-c0b141dfb798-config-volume\") pod \"collect-profiles-29402340-7xmp4\" (UID: \"3343b437-ae99-41ef-a0ae-c0b141dfb798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.448480 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3343b437-ae99-41ef-a0ae-c0b141dfb798-secret-volume\") pod \"collect-profiles-29402340-7xmp4\" (UID: \"3343b437-ae99-41ef-a0ae-c0b141dfb798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.448567 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnq26\" (UniqueName: \"kubernetes.io/projected/3343b437-ae99-41ef-a0ae-c0b141dfb798-kube-api-access-dnq26\") pod \"collect-profiles-29402340-7xmp4\" (UID: \"3343b437-ae99-41ef-a0ae-c0b141dfb798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.448701 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3343b437-ae99-41ef-a0ae-c0b141dfb798-config-volume\") pod \"collect-profiles-29402340-7xmp4\" (UID: \"3343b437-ae99-41ef-a0ae-c0b141dfb798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.449776 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3343b437-ae99-41ef-a0ae-c0b141dfb798-config-volume\") pod \"collect-profiles-29402340-7xmp4\" (UID: \"3343b437-ae99-41ef-a0ae-c0b141dfb798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.458490 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3343b437-ae99-41ef-a0ae-c0b141dfb798-secret-volume\") pod \"collect-profiles-29402340-7xmp4\" (UID: \"3343b437-ae99-41ef-a0ae-c0b141dfb798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.478454 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnq26\" (UniqueName: \"kubernetes.io/projected/3343b437-ae99-41ef-a0ae-c0b141dfb798-kube-api-access-dnq26\") pod \"collect-profiles-29402340-7xmp4\" (UID: \"3343b437-ae99-41ef-a0ae-c0b141dfb798\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.508224 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4" Nov 26 07:00:00 crc kubenswrapper[4871]: I1126 07:00:00.972172 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4"] Nov 26 07:00:01 crc kubenswrapper[4871]: I1126 07:00:01.389430 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4" event={"ID":"3343b437-ae99-41ef-a0ae-c0b141dfb798","Type":"ContainerStarted","Data":"eb25c267d929ee919472def6bd0732a9ca264bdb3bc19846c20bc23f5f13db91"} Nov 26 07:00:01 crc kubenswrapper[4871]: I1126 07:00:01.389937 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4" event={"ID":"3343b437-ae99-41ef-a0ae-c0b141dfb798","Type":"ContainerStarted","Data":"02ae8ea07be8a8e2643290e3a1ec9b02be04d3d4c4b759023c0c0ad01c857bb4"} Nov 26 07:00:01 crc kubenswrapper[4871]: I1126 07:00:01.418643 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4" podStartSLOduration=1.418610078 podStartE2EDuration="1.418610078s" podCreationTimestamp="2025-11-26 07:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:00:01.407947454 +0000 UTC m=+5659.590999050" watchObservedRunningTime="2025-11-26 07:00:01.418610078 +0000 UTC m=+5659.601661704" Nov 26 07:00:02 crc kubenswrapper[4871]: I1126 07:00:02.400023 4871 generic.go:334] "Generic (PLEG): container finished" podID="3343b437-ae99-41ef-a0ae-c0b141dfb798" containerID="eb25c267d929ee919472def6bd0732a9ca264bdb3bc19846c20bc23f5f13db91" exitCode=0 Nov 26 07:00:02 crc kubenswrapper[4871]: I1126 07:00:02.400123 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4" event={"ID":"3343b437-ae99-41ef-a0ae-c0b141dfb798","Type":"ContainerDied","Data":"eb25c267d929ee919472def6bd0732a9ca264bdb3bc19846c20bc23f5f13db91"} Nov 26 07:00:03 crc kubenswrapper[4871]: I1126 07:00:03.793226 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4" Nov 26 07:00:03 crc kubenswrapper[4871]: I1126 07:00:03.932882 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3343b437-ae99-41ef-a0ae-c0b141dfb798-config-volume\") pod \"3343b437-ae99-41ef-a0ae-c0b141dfb798\" (UID: \"3343b437-ae99-41ef-a0ae-c0b141dfb798\") " Nov 26 07:00:03 crc kubenswrapper[4871]: I1126 07:00:03.933109 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dnq26\" (UniqueName: \"kubernetes.io/projected/3343b437-ae99-41ef-a0ae-c0b141dfb798-kube-api-access-dnq26\") pod \"3343b437-ae99-41ef-a0ae-c0b141dfb798\" (UID: \"3343b437-ae99-41ef-a0ae-c0b141dfb798\") " Nov 26 07:00:03 crc kubenswrapper[4871]: I1126 07:00:03.933182 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3343b437-ae99-41ef-a0ae-c0b141dfb798-secret-volume\") pod \"3343b437-ae99-41ef-a0ae-c0b141dfb798\" (UID: \"3343b437-ae99-41ef-a0ae-c0b141dfb798\") " Nov 26 07:00:03 crc kubenswrapper[4871]: I1126 07:00:03.933664 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3343b437-ae99-41ef-a0ae-c0b141dfb798-config-volume" (OuterVolumeSpecName: "config-volume") pod "3343b437-ae99-41ef-a0ae-c0b141dfb798" (UID: "3343b437-ae99-41ef-a0ae-c0b141dfb798"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 07:00:03 crc kubenswrapper[4871]: I1126 07:00:03.941697 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3343b437-ae99-41ef-a0ae-c0b141dfb798-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3343b437-ae99-41ef-a0ae-c0b141dfb798" (UID: "3343b437-ae99-41ef-a0ae-c0b141dfb798"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:00:03 crc kubenswrapper[4871]: I1126 07:00:03.943779 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3343b437-ae99-41ef-a0ae-c0b141dfb798-kube-api-access-dnq26" (OuterVolumeSpecName: "kube-api-access-dnq26") pod "3343b437-ae99-41ef-a0ae-c0b141dfb798" (UID: "3343b437-ae99-41ef-a0ae-c0b141dfb798"). InnerVolumeSpecName "kube-api-access-dnq26". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:00:04 crc kubenswrapper[4871]: I1126 07:00:04.035861 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dnq26\" (UniqueName: \"kubernetes.io/projected/3343b437-ae99-41ef-a0ae-c0b141dfb798-kube-api-access-dnq26\") on node \"crc\" DevicePath \"\"" Nov 26 07:00:04 crc kubenswrapper[4871]: I1126 07:00:04.035894 4871 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3343b437-ae99-41ef-a0ae-c0b141dfb798-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 07:00:04 crc kubenswrapper[4871]: I1126 07:00:04.035906 4871 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3343b437-ae99-41ef-a0ae-c0b141dfb798-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 07:00:04 crc kubenswrapper[4871]: I1126 07:00:04.420675 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4" event={"ID":"3343b437-ae99-41ef-a0ae-c0b141dfb798","Type":"ContainerDied","Data":"02ae8ea07be8a8e2643290e3a1ec9b02be04d3d4c4b759023c0c0ad01c857bb4"} Nov 26 07:00:04 crc kubenswrapper[4871]: I1126 07:00:04.421072 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="02ae8ea07be8a8e2643290e3a1ec9b02be04d3d4c4b759023c0c0ad01c857bb4" Nov 26 07:00:04 crc kubenswrapper[4871]: I1126 07:00:04.420724 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402340-7xmp4" Nov 26 07:00:04 crc kubenswrapper[4871]: I1126 07:00:04.482807 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j"] Nov 26 07:00:04 crc kubenswrapper[4871]: I1126 07:00:04.492076 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402295-k8j7j"] Nov 26 07:00:04 crc kubenswrapper[4871]: I1126 07:00:04.517650 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9d9076c-3859-4018-87b0-0ca60e08219c" path="/var/lib/kubelet/pods/c9d9076c-3859-4018-87b0-0ca60e08219c/volumes" Nov 26 07:00:23 crc kubenswrapper[4871]: I1126 07:00:23.614629 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:00:23 crc kubenswrapper[4871]: I1126 07:00:23.615149 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:00:25 crc kubenswrapper[4871]: I1126 07:00:25.083173 4871 scope.go:117] "RemoveContainer" containerID="5a7b761141e485c178ee52de85870225a416d99b3b6a361fa0d7e05ab10c8334" Nov 26 07:00:33 crc kubenswrapper[4871]: I1126 07:00:33.791390 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-vsr79/must-gather-rtl88"] Nov 26 07:00:33 crc kubenswrapper[4871]: E1126 07:00:33.792313 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3343b437-ae99-41ef-a0ae-c0b141dfb798" containerName="collect-profiles" Nov 26 07:00:33 crc kubenswrapper[4871]: I1126 07:00:33.792327 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="3343b437-ae99-41ef-a0ae-c0b141dfb798" containerName="collect-profiles" Nov 26 07:00:33 crc kubenswrapper[4871]: I1126 07:00:33.792583 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="3343b437-ae99-41ef-a0ae-c0b141dfb798" containerName="collect-profiles" Nov 26 07:00:33 crc kubenswrapper[4871]: I1126 07:00:33.802038 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vsr79/must-gather-rtl88" Nov 26 07:00:33 crc kubenswrapper[4871]: I1126 07:00:33.804468 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-vsr79"/"kube-root-ca.crt" Nov 26 07:00:33 crc kubenswrapper[4871]: I1126 07:00:33.805566 4871 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-vsr79"/"openshift-service-ca.crt" Nov 26 07:00:33 crc kubenswrapper[4871]: I1126 07:00:33.834343 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-vsr79/must-gather-rtl88"] Nov 26 07:00:33 crc kubenswrapper[4871]: I1126 07:00:33.893707 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/95398267-c3f9-4b92-876c-9e5c594e63cb-must-gather-output\") pod \"must-gather-rtl88\" (UID: \"95398267-c3f9-4b92-876c-9e5c594e63cb\") " pod="openshift-must-gather-vsr79/must-gather-rtl88" Nov 26 07:00:33 crc kubenswrapper[4871]: I1126 07:00:33.893750 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8nx6\" (UniqueName: \"kubernetes.io/projected/95398267-c3f9-4b92-876c-9e5c594e63cb-kube-api-access-j8nx6\") pod \"must-gather-rtl88\" (UID: \"95398267-c3f9-4b92-876c-9e5c594e63cb\") " pod="openshift-must-gather-vsr79/must-gather-rtl88" Nov 26 07:00:33 crc kubenswrapper[4871]: I1126 07:00:33.995488 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/95398267-c3f9-4b92-876c-9e5c594e63cb-must-gather-output\") pod \"must-gather-rtl88\" (UID: \"95398267-c3f9-4b92-876c-9e5c594e63cb\") " pod="openshift-must-gather-vsr79/must-gather-rtl88" Nov 26 07:00:33 crc kubenswrapper[4871]: I1126 07:00:33.995551 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8nx6\" (UniqueName: \"kubernetes.io/projected/95398267-c3f9-4b92-876c-9e5c594e63cb-kube-api-access-j8nx6\") pod \"must-gather-rtl88\" (UID: \"95398267-c3f9-4b92-876c-9e5c594e63cb\") " pod="openshift-must-gather-vsr79/must-gather-rtl88" Nov 26 07:00:33 crc kubenswrapper[4871]: I1126 07:00:33.996085 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/95398267-c3f9-4b92-876c-9e5c594e63cb-must-gather-output\") pod \"must-gather-rtl88\" (UID: \"95398267-c3f9-4b92-876c-9e5c594e63cb\") " pod="openshift-must-gather-vsr79/must-gather-rtl88" Nov 26 07:00:34 crc kubenswrapper[4871]: I1126 07:00:34.021278 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8nx6\" (UniqueName: \"kubernetes.io/projected/95398267-c3f9-4b92-876c-9e5c594e63cb-kube-api-access-j8nx6\") pod \"must-gather-rtl88\" (UID: \"95398267-c3f9-4b92-876c-9e5c594e63cb\") " pod="openshift-must-gather-vsr79/must-gather-rtl88" Nov 26 07:00:34 crc kubenswrapper[4871]: I1126 07:00:34.122199 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vsr79/must-gather-rtl88" Nov 26 07:00:34 crc kubenswrapper[4871]: I1126 07:00:34.662971 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-vsr79/must-gather-rtl88"] Nov 26 07:00:34 crc kubenswrapper[4871]: W1126 07:00:34.666085 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95398267_c3f9_4b92_876c_9e5c594e63cb.slice/crio-746977fe71719e9721c0ada0c70e625cab439ef50eb2d2498a45dd602c20e629 WatchSource:0}: Error finding container 746977fe71719e9721c0ada0c70e625cab439ef50eb2d2498a45dd602c20e629: Status 404 returned error can't find the container with id 746977fe71719e9721c0ada0c70e625cab439ef50eb2d2498a45dd602c20e629 Nov 26 07:00:34 crc kubenswrapper[4871]: I1126 07:00:34.747074 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vsr79/must-gather-rtl88" event={"ID":"95398267-c3f9-4b92-876c-9e5c594e63cb","Type":"ContainerStarted","Data":"746977fe71719e9721c0ada0c70e625cab439ef50eb2d2498a45dd602c20e629"} Nov 26 07:00:35 crc kubenswrapper[4871]: I1126 07:00:35.757031 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vsr79/must-gather-rtl88" event={"ID":"95398267-c3f9-4b92-876c-9e5c594e63cb","Type":"ContainerStarted","Data":"8d1c6fac7c9a6ca2bfb3bcf189ec7bcdb89901fde8d19ac7707314584b609ba7"} Nov 26 07:00:35 crc kubenswrapper[4871]: I1126 07:00:35.757326 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vsr79/must-gather-rtl88" event={"ID":"95398267-c3f9-4b92-876c-9e5c594e63cb","Type":"ContainerStarted","Data":"26b782acd8d954890425c38deb57daa79f731e5d2dc03fdf80c5c5d3a87b91a9"} Nov 26 07:00:38 crc kubenswrapper[4871]: I1126 07:00:38.852841 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-vsr79/must-gather-rtl88" podStartSLOduration=5.852816209 podStartE2EDuration="5.852816209s" podCreationTimestamp="2025-11-26 07:00:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:00:35.78213222 +0000 UTC m=+5693.965183806" watchObservedRunningTime="2025-11-26 07:00:38.852816209 +0000 UTC m=+5697.035867795" Nov 26 07:00:38 crc kubenswrapper[4871]: I1126 07:00:38.855950 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-vsr79/crc-debug-ht64f"] Nov 26 07:00:38 crc kubenswrapper[4871]: I1126 07:00:38.857236 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vsr79/crc-debug-ht64f" Nov 26 07:00:38 crc kubenswrapper[4871]: I1126 07:00:38.860372 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-vsr79"/"default-dockercfg-vd4t5" Nov 26 07:00:38 crc kubenswrapper[4871]: I1126 07:00:38.901889 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2gnn\" (UniqueName: \"kubernetes.io/projected/23d13efa-98e4-4ad3-8379-7f38e633fc2b-kube-api-access-f2gnn\") pod \"crc-debug-ht64f\" (UID: \"23d13efa-98e4-4ad3-8379-7f38e633fc2b\") " pod="openshift-must-gather-vsr79/crc-debug-ht64f" Nov 26 07:00:38 crc kubenswrapper[4871]: I1126 07:00:38.902138 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/23d13efa-98e4-4ad3-8379-7f38e633fc2b-host\") pod \"crc-debug-ht64f\" (UID: \"23d13efa-98e4-4ad3-8379-7f38e633fc2b\") " pod="openshift-must-gather-vsr79/crc-debug-ht64f" Nov 26 07:00:39 crc kubenswrapper[4871]: I1126 07:00:39.003884 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/23d13efa-98e4-4ad3-8379-7f38e633fc2b-host\") pod \"crc-debug-ht64f\" (UID: \"23d13efa-98e4-4ad3-8379-7f38e633fc2b\") " pod="openshift-must-gather-vsr79/crc-debug-ht64f" Nov 26 07:00:39 crc kubenswrapper[4871]: I1126 07:00:39.003952 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2gnn\" (UniqueName: \"kubernetes.io/projected/23d13efa-98e4-4ad3-8379-7f38e633fc2b-kube-api-access-f2gnn\") pod \"crc-debug-ht64f\" (UID: \"23d13efa-98e4-4ad3-8379-7f38e633fc2b\") " pod="openshift-must-gather-vsr79/crc-debug-ht64f" Nov 26 07:00:39 crc kubenswrapper[4871]: I1126 07:00:39.004309 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/23d13efa-98e4-4ad3-8379-7f38e633fc2b-host\") pod \"crc-debug-ht64f\" (UID: \"23d13efa-98e4-4ad3-8379-7f38e633fc2b\") " pod="openshift-must-gather-vsr79/crc-debug-ht64f" Nov 26 07:00:39 crc kubenswrapper[4871]: I1126 07:00:39.023362 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2gnn\" (UniqueName: \"kubernetes.io/projected/23d13efa-98e4-4ad3-8379-7f38e633fc2b-kube-api-access-f2gnn\") pod \"crc-debug-ht64f\" (UID: \"23d13efa-98e4-4ad3-8379-7f38e633fc2b\") " pod="openshift-must-gather-vsr79/crc-debug-ht64f" Nov 26 07:00:39 crc kubenswrapper[4871]: I1126 07:00:39.177962 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vsr79/crc-debug-ht64f" Nov 26 07:00:39 crc kubenswrapper[4871]: I1126 07:00:39.795408 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vsr79/crc-debug-ht64f" event={"ID":"23d13efa-98e4-4ad3-8379-7f38e633fc2b","Type":"ContainerStarted","Data":"d572a77bd1c4300875ab5bfa5ba42931119b91334206cbd2d4ab47949018e3b3"} Nov 26 07:00:39 crc kubenswrapper[4871]: I1126 07:00:39.795981 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vsr79/crc-debug-ht64f" event={"ID":"23d13efa-98e4-4ad3-8379-7f38e633fc2b","Type":"ContainerStarted","Data":"d320b6595e01d70274a6333cc02de8c8dd6ae7fed2c7dc4858f32670a6ef8de5"} Nov 26 07:00:39 crc kubenswrapper[4871]: I1126 07:00:39.812903 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-vsr79/crc-debug-ht64f" podStartSLOduration=1.8128816909999999 podStartE2EDuration="1.812881691s" podCreationTimestamp="2025-11-26 07:00:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:00:39.81161766 +0000 UTC m=+5697.994669246" watchObservedRunningTime="2025-11-26 07:00:39.812881691 +0000 UTC m=+5697.995933287" Nov 26 07:00:53 crc kubenswrapper[4871]: I1126 07:00:53.615327 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:00:53 crc kubenswrapper[4871]: I1126 07:00:53.615999 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:00:53 crc kubenswrapper[4871]: I1126 07:00:53.616060 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 07:00:53 crc kubenswrapper[4871]: I1126 07:00:53.617009 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:00:53 crc kubenswrapper[4871]: I1126 07:00:53.617096 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" gracePeriod=600 Nov 26 07:00:53 crc kubenswrapper[4871]: E1126 07:00:53.741594 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:00:53 crc kubenswrapper[4871]: I1126 07:00:53.931790 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" exitCode=0 Nov 26 07:00:53 crc kubenswrapper[4871]: I1126 07:00:53.931834 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947"} Nov 26 07:00:53 crc kubenswrapper[4871]: I1126 07:00:53.931867 4871 scope.go:117] "RemoveContainer" containerID="8c8e6c6e65605e9250462f55057cd081512df064b1cb341cc927f8d5cb10ef03" Nov 26 07:00:53 crc kubenswrapper[4871]: I1126 07:00:53.932586 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:00:53 crc kubenswrapper[4871]: E1126 07:00:53.933001 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:01:00 crc kubenswrapper[4871]: I1126 07:01:00.162145 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29402341-l6znv"] Nov 26 07:01:00 crc kubenswrapper[4871]: I1126 07:01:00.164138 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402341-l6znv" Nov 26 07:01:00 crc kubenswrapper[4871]: I1126 07:01:00.177763 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29402341-l6znv"] Nov 26 07:01:00 crc kubenswrapper[4871]: I1126 07:01:00.341016 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/444a6e6f-d05b-4413-b021-fe7770e8e431-config-data\") pod \"keystone-cron-29402341-l6znv\" (UID: \"444a6e6f-d05b-4413-b021-fe7770e8e431\") " pod="openstack/keystone-cron-29402341-l6znv" Nov 26 07:01:00 crc kubenswrapper[4871]: I1126 07:01:00.341465 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4ms5\" (UniqueName: \"kubernetes.io/projected/444a6e6f-d05b-4413-b021-fe7770e8e431-kube-api-access-t4ms5\") pod \"keystone-cron-29402341-l6znv\" (UID: \"444a6e6f-d05b-4413-b021-fe7770e8e431\") " pod="openstack/keystone-cron-29402341-l6znv" Nov 26 07:01:00 crc kubenswrapper[4871]: I1126 07:01:00.341734 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/444a6e6f-d05b-4413-b021-fe7770e8e431-combined-ca-bundle\") pod \"keystone-cron-29402341-l6znv\" (UID: \"444a6e6f-d05b-4413-b021-fe7770e8e431\") " pod="openstack/keystone-cron-29402341-l6znv" Nov 26 07:01:00 crc kubenswrapper[4871]: I1126 07:01:00.341811 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/444a6e6f-d05b-4413-b021-fe7770e8e431-fernet-keys\") pod \"keystone-cron-29402341-l6znv\" (UID: \"444a6e6f-d05b-4413-b021-fe7770e8e431\") " pod="openstack/keystone-cron-29402341-l6znv" Nov 26 07:01:00 crc kubenswrapper[4871]: I1126 07:01:00.443953 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/444a6e6f-d05b-4413-b021-fe7770e8e431-config-data\") pod \"keystone-cron-29402341-l6znv\" (UID: \"444a6e6f-d05b-4413-b021-fe7770e8e431\") " pod="openstack/keystone-cron-29402341-l6znv" Nov 26 07:01:00 crc kubenswrapper[4871]: I1126 07:01:00.444082 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4ms5\" (UniqueName: \"kubernetes.io/projected/444a6e6f-d05b-4413-b021-fe7770e8e431-kube-api-access-t4ms5\") pod \"keystone-cron-29402341-l6znv\" (UID: \"444a6e6f-d05b-4413-b021-fe7770e8e431\") " pod="openstack/keystone-cron-29402341-l6znv" Nov 26 07:01:00 crc kubenswrapper[4871]: I1126 07:01:00.444148 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/444a6e6f-d05b-4413-b021-fe7770e8e431-combined-ca-bundle\") pod \"keystone-cron-29402341-l6znv\" (UID: \"444a6e6f-d05b-4413-b021-fe7770e8e431\") " pod="openstack/keystone-cron-29402341-l6znv" Nov 26 07:01:00 crc kubenswrapper[4871]: I1126 07:01:00.444175 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/444a6e6f-d05b-4413-b021-fe7770e8e431-fernet-keys\") pod \"keystone-cron-29402341-l6znv\" (UID: \"444a6e6f-d05b-4413-b021-fe7770e8e431\") " pod="openstack/keystone-cron-29402341-l6znv" Nov 26 07:01:00 crc kubenswrapper[4871]: I1126 07:01:00.453915 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/444a6e6f-d05b-4413-b021-fe7770e8e431-combined-ca-bundle\") pod \"keystone-cron-29402341-l6znv\" (UID: \"444a6e6f-d05b-4413-b021-fe7770e8e431\") " pod="openstack/keystone-cron-29402341-l6znv" Nov 26 07:01:00 crc kubenswrapper[4871]: I1126 07:01:00.453961 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/444a6e6f-d05b-4413-b021-fe7770e8e431-fernet-keys\") pod \"keystone-cron-29402341-l6znv\" (UID: \"444a6e6f-d05b-4413-b021-fe7770e8e431\") " pod="openstack/keystone-cron-29402341-l6znv" Nov 26 07:01:00 crc kubenswrapper[4871]: I1126 07:01:00.454033 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/444a6e6f-d05b-4413-b021-fe7770e8e431-config-data\") pod \"keystone-cron-29402341-l6znv\" (UID: \"444a6e6f-d05b-4413-b021-fe7770e8e431\") " pod="openstack/keystone-cron-29402341-l6znv" Nov 26 07:01:00 crc kubenswrapper[4871]: I1126 07:01:00.462788 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4ms5\" (UniqueName: \"kubernetes.io/projected/444a6e6f-d05b-4413-b021-fe7770e8e431-kube-api-access-t4ms5\") pod \"keystone-cron-29402341-l6znv\" (UID: \"444a6e6f-d05b-4413-b021-fe7770e8e431\") " pod="openstack/keystone-cron-29402341-l6znv" Nov 26 07:01:00 crc kubenswrapper[4871]: I1126 07:01:00.492847 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402341-l6znv" Nov 26 07:01:01 crc kubenswrapper[4871]: I1126 07:01:00.999979 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29402341-l6znv"] Nov 26 07:01:01 crc kubenswrapper[4871]: W1126 07:01:01.004502 4871 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod444a6e6f_d05b_4413_b021_fe7770e8e431.slice/crio-685e06ac2d537a7b84d6bad6124f3ec04949100483bc6eeea5501c969601d534 WatchSource:0}: Error finding container 685e06ac2d537a7b84d6bad6124f3ec04949100483bc6eeea5501c969601d534: Status 404 returned error can't find the container with id 685e06ac2d537a7b84d6bad6124f3ec04949100483bc6eeea5501c969601d534 Nov 26 07:01:02 crc kubenswrapper[4871]: I1126 07:01:02.014824 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402341-l6znv" event={"ID":"444a6e6f-d05b-4413-b021-fe7770e8e431","Type":"ContainerStarted","Data":"f8f0ef33a317009cfb531dbd1ffb00dfbb63b2bd3a7a7068076194645a20604c"} Nov 26 07:01:02 crc kubenswrapper[4871]: I1126 07:01:02.015266 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402341-l6znv" event={"ID":"444a6e6f-d05b-4413-b021-fe7770e8e431","Type":"ContainerStarted","Data":"685e06ac2d537a7b84d6bad6124f3ec04949100483bc6eeea5501c969601d534"} Nov 26 07:01:02 crc kubenswrapper[4871]: I1126 07:01:02.039996 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29402341-l6znv" podStartSLOduration=2.03997422 podStartE2EDuration="2.03997422s" podCreationTimestamp="2025-11-26 07:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:01:02.030822023 +0000 UTC m=+5720.213873609" watchObservedRunningTime="2025-11-26 07:01:02.03997422 +0000 UTC m=+5720.223025806" Nov 26 07:01:05 crc kubenswrapper[4871]: I1126 07:01:05.507573 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:01:05 crc kubenswrapper[4871]: E1126 07:01:05.508426 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:01:06 crc kubenswrapper[4871]: I1126 07:01:06.070436 4871 generic.go:334] "Generic (PLEG): container finished" podID="444a6e6f-d05b-4413-b021-fe7770e8e431" containerID="f8f0ef33a317009cfb531dbd1ffb00dfbb63b2bd3a7a7068076194645a20604c" exitCode=0 Nov 26 07:01:06 crc kubenswrapper[4871]: I1126 07:01:06.070493 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402341-l6znv" event={"ID":"444a6e6f-d05b-4413-b021-fe7770e8e431","Type":"ContainerDied","Data":"f8f0ef33a317009cfb531dbd1ffb00dfbb63b2bd3a7a7068076194645a20604c"} Nov 26 07:01:07 crc kubenswrapper[4871]: I1126 07:01:07.513318 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402341-l6znv" Nov 26 07:01:07 crc kubenswrapper[4871]: I1126 07:01:07.604687 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/444a6e6f-d05b-4413-b021-fe7770e8e431-fernet-keys\") pod \"444a6e6f-d05b-4413-b021-fe7770e8e431\" (UID: \"444a6e6f-d05b-4413-b021-fe7770e8e431\") " Nov 26 07:01:07 crc kubenswrapper[4871]: I1126 07:01:07.604842 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t4ms5\" (UniqueName: \"kubernetes.io/projected/444a6e6f-d05b-4413-b021-fe7770e8e431-kube-api-access-t4ms5\") pod \"444a6e6f-d05b-4413-b021-fe7770e8e431\" (UID: \"444a6e6f-d05b-4413-b021-fe7770e8e431\") " Nov 26 07:01:07 crc kubenswrapper[4871]: I1126 07:01:07.604966 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/444a6e6f-d05b-4413-b021-fe7770e8e431-combined-ca-bundle\") pod \"444a6e6f-d05b-4413-b021-fe7770e8e431\" (UID: \"444a6e6f-d05b-4413-b021-fe7770e8e431\") " Nov 26 07:01:07 crc kubenswrapper[4871]: I1126 07:01:07.605049 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/444a6e6f-d05b-4413-b021-fe7770e8e431-config-data\") pod \"444a6e6f-d05b-4413-b021-fe7770e8e431\" (UID: \"444a6e6f-d05b-4413-b021-fe7770e8e431\") " Nov 26 07:01:07 crc kubenswrapper[4871]: I1126 07:01:07.613797 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/444a6e6f-d05b-4413-b021-fe7770e8e431-kube-api-access-t4ms5" (OuterVolumeSpecName: "kube-api-access-t4ms5") pod "444a6e6f-d05b-4413-b021-fe7770e8e431" (UID: "444a6e6f-d05b-4413-b021-fe7770e8e431"). InnerVolumeSpecName "kube-api-access-t4ms5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:01:07 crc kubenswrapper[4871]: I1126 07:01:07.621156 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/444a6e6f-d05b-4413-b021-fe7770e8e431-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "444a6e6f-d05b-4413-b021-fe7770e8e431" (UID: "444a6e6f-d05b-4413-b021-fe7770e8e431"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:01:07 crc kubenswrapper[4871]: I1126 07:01:07.674189 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/444a6e6f-d05b-4413-b021-fe7770e8e431-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "444a6e6f-d05b-4413-b021-fe7770e8e431" (UID: "444a6e6f-d05b-4413-b021-fe7770e8e431"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:01:07 crc kubenswrapper[4871]: I1126 07:01:07.704947 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/444a6e6f-d05b-4413-b021-fe7770e8e431-config-data" (OuterVolumeSpecName: "config-data") pod "444a6e6f-d05b-4413-b021-fe7770e8e431" (UID: "444a6e6f-d05b-4413-b021-fe7770e8e431"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 07:01:07 crc kubenswrapper[4871]: I1126 07:01:07.708252 4871 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/444a6e6f-d05b-4413-b021-fe7770e8e431-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 07:01:07 crc kubenswrapper[4871]: I1126 07:01:07.708279 4871 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/444a6e6f-d05b-4413-b021-fe7770e8e431-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 07:01:07 crc kubenswrapper[4871]: I1126 07:01:07.708291 4871 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/444a6e6f-d05b-4413-b021-fe7770e8e431-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 07:01:07 crc kubenswrapper[4871]: I1126 07:01:07.708305 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t4ms5\" (UniqueName: \"kubernetes.io/projected/444a6e6f-d05b-4413-b021-fe7770e8e431-kube-api-access-t4ms5\") on node \"crc\" DevicePath \"\"" Nov 26 07:01:08 crc kubenswrapper[4871]: I1126 07:01:08.105752 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402341-l6znv" event={"ID":"444a6e6f-d05b-4413-b021-fe7770e8e431","Type":"ContainerDied","Data":"685e06ac2d537a7b84d6bad6124f3ec04949100483bc6eeea5501c969601d534"} Nov 26 07:01:08 crc kubenswrapper[4871]: I1126 07:01:08.105795 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="685e06ac2d537a7b84d6bad6124f3ec04949100483bc6eeea5501c969601d534" Nov 26 07:01:08 crc kubenswrapper[4871]: I1126 07:01:08.105854 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402341-l6znv" Nov 26 07:01:19 crc kubenswrapper[4871]: I1126 07:01:19.506982 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:01:19 crc kubenswrapper[4871]: E1126 07:01:19.507608 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:01:23 crc kubenswrapper[4871]: I1126 07:01:23.253416 4871 generic.go:334] "Generic (PLEG): container finished" podID="23d13efa-98e4-4ad3-8379-7f38e633fc2b" containerID="d572a77bd1c4300875ab5bfa5ba42931119b91334206cbd2d4ab47949018e3b3" exitCode=0 Nov 26 07:01:23 crc kubenswrapper[4871]: I1126 07:01:23.253548 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vsr79/crc-debug-ht64f" event={"ID":"23d13efa-98e4-4ad3-8379-7f38e633fc2b","Type":"ContainerDied","Data":"d572a77bd1c4300875ab5bfa5ba42931119b91334206cbd2d4ab47949018e3b3"} Nov 26 07:01:24 crc kubenswrapper[4871]: I1126 07:01:24.373605 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vsr79/crc-debug-ht64f" Nov 26 07:01:24 crc kubenswrapper[4871]: I1126 07:01:24.413892 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-vsr79/crc-debug-ht64f"] Nov 26 07:01:24 crc kubenswrapper[4871]: I1126 07:01:24.422623 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-vsr79/crc-debug-ht64f"] Nov 26 07:01:24 crc kubenswrapper[4871]: I1126 07:01:24.448156 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f2gnn\" (UniqueName: \"kubernetes.io/projected/23d13efa-98e4-4ad3-8379-7f38e633fc2b-kube-api-access-f2gnn\") pod \"23d13efa-98e4-4ad3-8379-7f38e633fc2b\" (UID: \"23d13efa-98e4-4ad3-8379-7f38e633fc2b\") " Nov 26 07:01:24 crc kubenswrapper[4871]: I1126 07:01:24.448262 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/23d13efa-98e4-4ad3-8379-7f38e633fc2b-host\") pod \"23d13efa-98e4-4ad3-8379-7f38e633fc2b\" (UID: \"23d13efa-98e4-4ad3-8379-7f38e633fc2b\") " Nov 26 07:01:24 crc kubenswrapper[4871]: I1126 07:01:24.448402 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/23d13efa-98e4-4ad3-8379-7f38e633fc2b-host" (OuterVolumeSpecName: "host") pod "23d13efa-98e4-4ad3-8379-7f38e633fc2b" (UID: "23d13efa-98e4-4ad3-8379-7f38e633fc2b"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:01:24 crc kubenswrapper[4871]: I1126 07:01:24.449285 4871 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/23d13efa-98e4-4ad3-8379-7f38e633fc2b-host\") on node \"crc\" DevicePath \"\"" Nov 26 07:01:24 crc kubenswrapper[4871]: I1126 07:01:24.461053 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23d13efa-98e4-4ad3-8379-7f38e633fc2b-kube-api-access-f2gnn" (OuterVolumeSpecName: "kube-api-access-f2gnn") pod "23d13efa-98e4-4ad3-8379-7f38e633fc2b" (UID: "23d13efa-98e4-4ad3-8379-7f38e633fc2b"). InnerVolumeSpecName "kube-api-access-f2gnn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:01:24 crc kubenswrapper[4871]: I1126 07:01:24.523424 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23d13efa-98e4-4ad3-8379-7f38e633fc2b" path="/var/lib/kubelet/pods/23d13efa-98e4-4ad3-8379-7f38e633fc2b/volumes" Nov 26 07:01:24 crc kubenswrapper[4871]: I1126 07:01:24.551271 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f2gnn\" (UniqueName: \"kubernetes.io/projected/23d13efa-98e4-4ad3-8379-7f38e633fc2b-kube-api-access-f2gnn\") on node \"crc\" DevicePath \"\"" Nov 26 07:01:25 crc kubenswrapper[4871]: I1126 07:01:25.273807 4871 scope.go:117] "RemoveContainer" containerID="d572a77bd1c4300875ab5bfa5ba42931119b91334206cbd2d4ab47949018e3b3" Nov 26 07:01:25 crc kubenswrapper[4871]: I1126 07:01:25.273850 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vsr79/crc-debug-ht64f" Nov 26 07:01:25 crc kubenswrapper[4871]: I1126 07:01:25.583406 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-vsr79/crc-debug-79glw"] Nov 26 07:01:25 crc kubenswrapper[4871]: E1126 07:01:25.583838 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="444a6e6f-d05b-4413-b021-fe7770e8e431" containerName="keystone-cron" Nov 26 07:01:25 crc kubenswrapper[4871]: I1126 07:01:25.583851 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="444a6e6f-d05b-4413-b021-fe7770e8e431" containerName="keystone-cron" Nov 26 07:01:25 crc kubenswrapper[4871]: E1126 07:01:25.583882 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23d13efa-98e4-4ad3-8379-7f38e633fc2b" containerName="container-00" Nov 26 07:01:25 crc kubenswrapper[4871]: I1126 07:01:25.583887 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="23d13efa-98e4-4ad3-8379-7f38e633fc2b" containerName="container-00" Nov 26 07:01:25 crc kubenswrapper[4871]: I1126 07:01:25.584083 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="23d13efa-98e4-4ad3-8379-7f38e633fc2b" containerName="container-00" Nov 26 07:01:25 crc kubenswrapper[4871]: I1126 07:01:25.584117 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="444a6e6f-d05b-4413-b021-fe7770e8e431" containerName="keystone-cron" Nov 26 07:01:25 crc kubenswrapper[4871]: I1126 07:01:25.584893 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vsr79/crc-debug-79glw" Nov 26 07:01:25 crc kubenswrapper[4871]: I1126 07:01:25.586920 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-vsr79"/"default-dockercfg-vd4t5" Nov 26 07:01:25 crc kubenswrapper[4871]: I1126 07:01:25.673354 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q28z6\" (UniqueName: \"kubernetes.io/projected/93dabe5e-0834-415a-9715-d0daf96c1df1-kube-api-access-q28z6\") pod \"crc-debug-79glw\" (UID: \"93dabe5e-0834-415a-9715-d0daf96c1df1\") " pod="openshift-must-gather-vsr79/crc-debug-79glw" Nov 26 07:01:25 crc kubenswrapper[4871]: I1126 07:01:25.673471 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/93dabe5e-0834-415a-9715-d0daf96c1df1-host\") pod \"crc-debug-79glw\" (UID: \"93dabe5e-0834-415a-9715-d0daf96c1df1\") " pod="openshift-must-gather-vsr79/crc-debug-79glw" Nov 26 07:01:25 crc kubenswrapper[4871]: I1126 07:01:25.775333 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q28z6\" (UniqueName: \"kubernetes.io/projected/93dabe5e-0834-415a-9715-d0daf96c1df1-kube-api-access-q28z6\") pod \"crc-debug-79glw\" (UID: \"93dabe5e-0834-415a-9715-d0daf96c1df1\") " pod="openshift-must-gather-vsr79/crc-debug-79glw" Nov 26 07:01:25 crc kubenswrapper[4871]: I1126 07:01:25.775447 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/93dabe5e-0834-415a-9715-d0daf96c1df1-host\") pod \"crc-debug-79glw\" (UID: \"93dabe5e-0834-415a-9715-d0daf96c1df1\") " pod="openshift-must-gather-vsr79/crc-debug-79glw" Nov 26 07:01:25 crc kubenswrapper[4871]: I1126 07:01:25.775607 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/93dabe5e-0834-415a-9715-d0daf96c1df1-host\") pod \"crc-debug-79glw\" (UID: \"93dabe5e-0834-415a-9715-d0daf96c1df1\") " pod="openshift-must-gather-vsr79/crc-debug-79glw" Nov 26 07:01:25 crc kubenswrapper[4871]: I1126 07:01:25.792957 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q28z6\" (UniqueName: \"kubernetes.io/projected/93dabe5e-0834-415a-9715-d0daf96c1df1-kube-api-access-q28z6\") pod \"crc-debug-79glw\" (UID: \"93dabe5e-0834-415a-9715-d0daf96c1df1\") " pod="openshift-must-gather-vsr79/crc-debug-79glw" Nov 26 07:01:25 crc kubenswrapper[4871]: I1126 07:01:25.900034 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vsr79/crc-debug-79glw" Nov 26 07:01:26 crc kubenswrapper[4871]: I1126 07:01:26.284242 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vsr79/crc-debug-79glw" event={"ID":"93dabe5e-0834-415a-9715-d0daf96c1df1","Type":"ContainerStarted","Data":"2fab019b1173d4acae2800f4dc5544604cbef0a034705320e202a8fa3a3fac4a"} Nov 26 07:01:26 crc kubenswrapper[4871]: I1126 07:01:26.284803 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vsr79/crc-debug-79glw" event={"ID":"93dabe5e-0834-415a-9715-d0daf96c1df1","Type":"ContainerStarted","Data":"4e09389007f3616b5b982bb60f01e93f072adfe195d262ab689901630f43c984"} Nov 26 07:01:26 crc kubenswrapper[4871]: I1126 07:01:26.300861 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-vsr79/crc-debug-79glw" podStartSLOduration=1.300843793 podStartE2EDuration="1.300843793s" podCreationTimestamp="2025-11-26 07:01:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 07:01:26.299943341 +0000 UTC m=+5744.482994927" watchObservedRunningTime="2025-11-26 07:01:26.300843793 +0000 UTC m=+5744.483895379" Nov 26 07:01:27 crc kubenswrapper[4871]: I1126 07:01:27.295885 4871 generic.go:334] "Generic (PLEG): container finished" podID="93dabe5e-0834-415a-9715-d0daf96c1df1" containerID="2fab019b1173d4acae2800f4dc5544604cbef0a034705320e202a8fa3a3fac4a" exitCode=0 Nov 26 07:01:27 crc kubenswrapper[4871]: I1126 07:01:27.295945 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vsr79/crc-debug-79glw" event={"ID":"93dabe5e-0834-415a-9715-d0daf96c1df1","Type":"ContainerDied","Data":"2fab019b1173d4acae2800f4dc5544604cbef0a034705320e202a8fa3a3fac4a"} Nov 26 07:01:28 crc kubenswrapper[4871]: I1126 07:01:28.432366 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vsr79/crc-debug-79glw" Nov 26 07:01:28 crc kubenswrapper[4871]: I1126 07:01:28.535497 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/93dabe5e-0834-415a-9715-d0daf96c1df1-host\") pod \"93dabe5e-0834-415a-9715-d0daf96c1df1\" (UID: \"93dabe5e-0834-415a-9715-d0daf96c1df1\") " Nov 26 07:01:28 crc kubenswrapper[4871]: I1126 07:01:28.535593 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q28z6\" (UniqueName: \"kubernetes.io/projected/93dabe5e-0834-415a-9715-d0daf96c1df1-kube-api-access-q28z6\") pod \"93dabe5e-0834-415a-9715-d0daf96c1df1\" (UID: \"93dabe5e-0834-415a-9715-d0daf96c1df1\") " Nov 26 07:01:28 crc kubenswrapper[4871]: I1126 07:01:28.536328 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/93dabe5e-0834-415a-9715-d0daf96c1df1-host" (OuterVolumeSpecName: "host") pod "93dabe5e-0834-415a-9715-d0daf96c1df1" (UID: "93dabe5e-0834-415a-9715-d0daf96c1df1"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:01:28 crc kubenswrapper[4871]: I1126 07:01:28.586634 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93dabe5e-0834-415a-9715-d0daf96c1df1-kube-api-access-q28z6" (OuterVolumeSpecName: "kube-api-access-q28z6") pod "93dabe5e-0834-415a-9715-d0daf96c1df1" (UID: "93dabe5e-0834-415a-9715-d0daf96c1df1"). InnerVolumeSpecName "kube-api-access-q28z6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:01:28 crc kubenswrapper[4871]: I1126 07:01:28.637740 4871 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/93dabe5e-0834-415a-9715-d0daf96c1df1-host\") on node \"crc\" DevicePath \"\"" Nov 26 07:01:28 crc kubenswrapper[4871]: I1126 07:01:28.637778 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q28z6\" (UniqueName: \"kubernetes.io/projected/93dabe5e-0834-415a-9715-d0daf96c1df1-kube-api-access-q28z6\") on node \"crc\" DevicePath \"\"" Nov 26 07:01:28 crc kubenswrapper[4871]: I1126 07:01:28.890723 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-vsr79/crc-debug-79glw"] Nov 26 07:01:28 crc kubenswrapper[4871]: I1126 07:01:28.903549 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-vsr79/crc-debug-79glw"] Nov 26 07:01:29 crc kubenswrapper[4871]: I1126 07:01:29.321196 4871 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e09389007f3616b5b982bb60f01e93f072adfe195d262ab689901630f43c984" Nov 26 07:01:29 crc kubenswrapper[4871]: I1126 07:01:29.321266 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vsr79/crc-debug-79glw" Nov 26 07:01:30 crc kubenswrapper[4871]: I1126 07:01:30.082016 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-vsr79/crc-debug-7d9z4"] Nov 26 07:01:30 crc kubenswrapper[4871]: E1126 07:01:30.082418 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93dabe5e-0834-415a-9715-d0daf96c1df1" containerName="container-00" Nov 26 07:01:30 crc kubenswrapper[4871]: I1126 07:01:30.082431 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="93dabe5e-0834-415a-9715-d0daf96c1df1" containerName="container-00" Nov 26 07:01:30 crc kubenswrapper[4871]: I1126 07:01:30.082676 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="93dabe5e-0834-415a-9715-d0daf96c1df1" containerName="container-00" Nov 26 07:01:30 crc kubenswrapper[4871]: I1126 07:01:30.083381 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vsr79/crc-debug-7d9z4" Nov 26 07:01:30 crc kubenswrapper[4871]: I1126 07:01:30.085846 4871 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-vsr79"/"default-dockercfg-vd4t5" Nov 26 07:01:30 crc kubenswrapper[4871]: I1126 07:01:30.166355 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8fvc\" (UniqueName: \"kubernetes.io/projected/b9153a6a-99bb-462b-99cd-da4d0d1f00e9-kube-api-access-w8fvc\") pod \"crc-debug-7d9z4\" (UID: \"b9153a6a-99bb-462b-99cd-da4d0d1f00e9\") " pod="openshift-must-gather-vsr79/crc-debug-7d9z4" Nov 26 07:01:30 crc kubenswrapper[4871]: I1126 07:01:30.166426 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b9153a6a-99bb-462b-99cd-da4d0d1f00e9-host\") pod \"crc-debug-7d9z4\" (UID: \"b9153a6a-99bb-462b-99cd-da4d0d1f00e9\") " pod="openshift-must-gather-vsr79/crc-debug-7d9z4" Nov 26 07:01:30 crc kubenswrapper[4871]: I1126 07:01:30.268993 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8fvc\" (UniqueName: \"kubernetes.io/projected/b9153a6a-99bb-462b-99cd-da4d0d1f00e9-kube-api-access-w8fvc\") pod \"crc-debug-7d9z4\" (UID: \"b9153a6a-99bb-462b-99cd-da4d0d1f00e9\") " pod="openshift-must-gather-vsr79/crc-debug-7d9z4" Nov 26 07:01:30 crc kubenswrapper[4871]: I1126 07:01:30.269131 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b9153a6a-99bb-462b-99cd-da4d0d1f00e9-host\") pod \"crc-debug-7d9z4\" (UID: \"b9153a6a-99bb-462b-99cd-da4d0d1f00e9\") " pod="openshift-must-gather-vsr79/crc-debug-7d9z4" Nov 26 07:01:30 crc kubenswrapper[4871]: I1126 07:01:30.269239 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b9153a6a-99bb-462b-99cd-da4d0d1f00e9-host\") pod \"crc-debug-7d9z4\" (UID: \"b9153a6a-99bb-462b-99cd-da4d0d1f00e9\") " pod="openshift-must-gather-vsr79/crc-debug-7d9z4" Nov 26 07:01:30 crc kubenswrapper[4871]: I1126 07:01:30.299380 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8fvc\" (UniqueName: \"kubernetes.io/projected/b9153a6a-99bb-462b-99cd-da4d0d1f00e9-kube-api-access-w8fvc\") pod \"crc-debug-7d9z4\" (UID: \"b9153a6a-99bb-462b-99cd-da4d0d1f00e9\") " pod="openshift-must-gather-vsr79/crc-debug-7d9z4" Nov 26 07:01:30 crc kubenswrapper[4871]: I1126 07:01:30.403665 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vsr79/crc-debug-7d9z4" Nov 26 07:01:30 crc kubenswrapper[4871]: I1126 07:01:30.521503 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93dabe5e-0834-415a-9715-d0daf96c1df1" path="/var/lib/kubelet/pods/93dabe5e-0834-415a-9715-d0daf96c1df1/volumes" Nov 26 07:01:31 crc kubenswrapper[4871]: I1126 07:01:31.352229 4871 generic.go:334] "Generic (PLEG): container finished" podID="b9153a6a-99bb-462b-99cd-da4d0d1f00e9" containerID="a7b2ac9907fee117c24219137621654f10dac767fabf2f3d461388e511bf21b3" exitCode=0 Nov 26 07:01:31 crc kubenswrapper[4871]: I1126 07:01:31.352364 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vsr79/crc-debug-7d9z4" event={"ID":"b9153a6a-99bb-462b-99cd-da4d0d1f00e9","Type":"ContainerDied","Data":"a7b2ac9907fee117c24219137621654f10dac767fabf2f3d461388e511bf21b3"} Nov 26 07:01:31 crc kubenswrapper[4871]: I1126 07:01:31.352734 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vsr79/crc-debug-7d9z4" event={"ID":"b9153a6a-99bb-462b-99cd-da4d0d1f00e9","Type":"ContainerStarted","Data":"f6cdf173fda31b56e0e826ff406a541d069095ff5c5a2cc81914c317c4b13538"} Nov 26 07:01:31 crc kubenswrapper[4871]: I1126 07:01:31.414203 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-vsr79/crc-debug-7d9z4"] Nov 26 07:01:31 crc kubenswrapper[4871]: I1126 07:01:31.427557 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-vsr79/crc-debug-7d9z4"] Nov 26 07:01:32 crc kubenswrapper[4871]: I1126 07:01:32.469692 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vsr79/crc-debug-7d9z4" Nov 26 07:01:32 crc kubenswrapper[4871]: I1126 07:01:32.515705 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:01:32 crc kubenswrapper[4871]: E1126 07:01:32.516113 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:01:32 crc kubenswrapper[4871]: I1126 07:01:32.516542 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b9153a6a-99bb-462b-99cd-da4d0d1f00e9-host\") pod \"b9153a6a-99bb-462b-99cd-da4d0d1f00e9\" (UID: \"b9153a6a-99bb-462b-99cd-da4d0d1f00e9\") " Nov 26 07:01:32 crc kubenswrapper[4871]: I1126 07:01:32.516673 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b9153a6a-99bb-462b-99cd-da4d0d1f00e9-host" (OuterVolumeSpecName: "host") pod "b9153a6a-99bb-462b-99cd-da4d0d1f00e9" (UID: "b9153a6a-99bb-462b-99cd-da4d0d1f00e9"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 07:01:32 crc kubenswrapper[4871]: I1126 07:01:32.516740 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8fvc\" (UniqueName: \"kubernetes.io/projected/b9153a6a-99bb-462b-99cd-da4d0d1f00e9-kube-api-access-w8fvc\") pod \"b9153a6a-99bb-462b-99cd-da4d0d1f00e9\" (UID: \"b9153a6a-99bb-462b-99cd-da4d0d1f00e9\") " Nov 26 07:01:32 crc kubenswrapper[4871]: I1126 07:01:32.517279 4871 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b9153a6a-99bb-462b-99cd-da4d0d1f00e9-host\") on node \"crc\" DevicePath \"\"" Nov 26 07:01:32 crc kubenswrapper[4871]: I1126 07:01:32.541577 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9153a6a-99bb-462b-99cd-da4d0d1f00e9-kube-api-access-w8fvc" (OuterVolumeSpecName: "kube-api-access-w8fvc") pod "b9153a6a-99bb-462b-99cd-da4d0d1f00e9" (UID: "b9153a6a-99bb-462b-99cd-da4d0d1f00e9"). InnerVolumeSpecName "kube-api-access-w8fvc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:01:32 crc kubenswrapper[4871]: I1126 07:01:32.618954 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8fvc\" (UniqueName: \"kubernetes.io/projected/b9153a6a-99bb-462b-99cd-da4d0d1f00e9-kube-api-access-w8fvc\") on node \"crc\" DevicePath \"\"" Nov 26 07:01:33 crc kubenswrapper[4871]: I1126 07:01:33.374674 4871 scope.go:117] "RemoveContainer" containerID="a7b2ac9907fee117c24219137621654f10dac767fabf2f3d461388e511bf21b3" Nov 26 07:01:33 crc kubenswrapper[4871]: I1126 07:01:33.374781 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vsr79/crc-debug-7d9z4" Nov 26 07:01:34 crc kubenswrapper[4871]: I1126 07:01:34.517638 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9153a6a-99bb-462b-99cd-da4d0d1f00e9" path="/var/lib/kubelet/pods/b9153a6a-99bb-462b-99cd-da4d0d1f00e9/volumes" Nov 26 07:01:46 crc kubenswrapper[4871]: I1126 07:01:46.508255 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:01:46 crc kubenswrapper[4871]: E1126 07:01:46.509125 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:01:57 crc kubenswrapper[4871]: I1126 07:01:57.507335 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:01:57 crc kubenswrapper[4871]: E1126 07:01:57.508210 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:02:06 crc kubenswrapper[4871]: I1126 07:02:06.244912 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6b4f49568b-znxq7_421fd2e9-5378-4cd9-89c0-523f89b8fea6/barbican-api-log/0.log" Nov 26 07:02:06 crc kubenswrapper[4871]: I1126 07:02:06.249047 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6b4f49568b-znxq7_421fd2e9-5378-4cd9-89c0-523f89b8fea6/barbican-api/0.log" Nov 26 07:02:06 crc kubenswrapper[4871]: I1126 07:02:06.467737 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-ff77984c8-tthxz_2d0d2e04-05e3-4ace-8b11-0d6317e7ed80/barbican-keystone-listener/0.log" Nov 26 07:02:06 crc kubenswrapper[4871]: I1126 07:02:06.519824 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-ff77984c8-tthxz_2d0d2e04-05e3-4ace-8b11-0d6317e7ed80/barbican-keystone-listener-log/0.log" Nov 26 07:02:06 crc kubenswrapper[4871]: I1126 07:02:06.611968 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-574cf75679-xcbqs_19019851-fc4d-41ff-ba88-f347dc3305a2/barbican-worker/0.log" Nov 26 07:02:06 crc kubenswrapper[4871]: I1126 07:02:06.818801 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-574cf75679-xcbqs_19019851-fc4d-41ff-ba88-f347dc3305a2/barbican-worker-log/0.log" Nov 26 07:02:06 crc kubenswrapper[4871]: I1126 07:02:06.967154 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-qq5rq_a811292e-f231-48cd-98b5-4acd21f945ed/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 07:02:07 crc kubenswrapper[4871]: I1126 07:02:07.051970 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2ffa88fa-bd91-473e-8d4e-44fc61235b3d/ceilometer-central-agent/0.log" Nov 26 07:02:07 crc kubenswrapper[4871]: I1126 07:02:07.273123 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2ffa88fa-bd91-473e-8d4e-44fc61235b3d/ceilometer-notification-agent/0.log" Nov 26 07:02:07 crc kubenswrapper[4871]: I1126 07:02:07.284612 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2ffa88fa-bd91-473e-8d4e-44fc61235b3d/proxy-httpd/0.log" Nov 26 07:02:07 crc kubenswrapper[4871]: I1126 07:02:07.323777 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_2ffa88fa-bd91-473e-8d4e-44fc61235b3d/sg-core/0.log" Nov 26 07:02:07 crc kubenswrapper[4871]: I1126 07:02:07.570246 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_bb2121e7-904c-4de4-a336-0ed681cd9be9/cinder-api-log/0.log" Nov 26 07:02:07 crc kubenswrapper[4871]: I1126 07:02:07.802365 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_adddc22d-b976-4931-8dde-359f0952b438/cinder-scheduler/0.log" Nov 26 07:02:07 crc kubenswrapper[4871]: I1126 07:02:07.852863 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_bb2121e7-904c-4de4-a336-0ed681cd9be9/cinder-api/0.log" Nov 26 07:02:07 crc kubenswrapper[4871]: I1126 07:02:07.872777 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_adddc22d-b976-4931-8dde-359f0952b438/probe/0.log" Nov 26 07:02:08 crc kubenswrapper[4871]: I1126 07:02:08.095534 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-cs7rl_ff26f53b-8fe4-4dde-b475-348beb78046d/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 07:02:08 crc kubenswrapper[4871]: I1126 07:02:08.124831 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-tmvjt_46746b5b-e35a-452a-bdad-12b497a8c3b0/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 07:02:08 crc kubenswrapper[4871]: I1126 07:02:08.254975 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-77b58f4b85-prlhs_7ea434ed-7152-4539-9589-d743e9d5b6c5/init/0.log" Nov 26 07:02:08 crc kubenswrapper[4871]: I1126 07:02:08.459905 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-77b58f4b85-prlhs_7ea434ed-7152-4539-9589-d743e9d5b6c5/init/0.log" Nov 26 07:02:08 crc kubenswrapper[4871]: I1126 07:02:08.506867 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:02:08 crc kubenswrapper[4871]: E1126 07:02:08.507135 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:02:08 crc kubenswrapper[4871]: I1126 07:02:08.526277 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-hzhkt_e0d17dc4-5d95-48fe-bf52-6241f6bfd6fa/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 07:02:08 crc kubenswrapper[4871]: I1126 07:02:08.656070 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-77b58f4b85-prlhs_7ea434ed-7152-4539-9589-d743e9d5b6c5/dnsmasq-dns/0.log" Nov 26 07:02:08 crc kubenswrapper[4871]: I1126 07:02:08.728111 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_206a6ff7-c300-42b8-9816-a272aacc0d94/glance-httpd/0.log" Nov 26 07:02:08 crc kubenswrapper[4871]: I1126 07:02:08.777556 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_206a6ff7-c300-42b8-9816-a272aacc0d94/glance-log/0.log" Nov 26 07:02:08 crc kubenswrapper[4871]: I1126 07:02:08.950283 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_a199844c-c13e-47ce-8980-b3292e3435b3/glance-httpd/0.log" Nov 26 07:02:09 crc kubenswrapper[4871]: I1126 07:02:09.032839 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_a199844c-c13e-47ce-8980-b3292e3435b3/glance-log/0.log" Nov 26 07:02:09 crc kubenswrapper[4871]: I1126 07:02:09.242725 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7cbf6bc784-rm6hn_4a2ec979-4e84-42ce-9299-8b9f5d88f001/horizon/0.log" Nov 26 07:02:09 crc kubenswrapper[4871]: I1126 07:02:09.336207 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-gf8rj_48acdf72-822b-456b-b545-bd1499db855d/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 07:02:09 crc kubenswrapper[4871]: I1126 07:02:09.773803 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7cbf6bc784-rm6hn_4a2ec979-4e84-42ce-9299-8b9f5d88f001/horizon-log/0.log" Nov 26 07:02:09 crc kubenswrapper[4871]: I1126 07:02:09.786299 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-99lcn_8fa0c629-09c7-43d9-964c-37320a475595/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 07:02:09 crc kubenswrapper[4871]: I1126 07:02:09.985929 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29402281-4rm7g_3af4cf7b-408a-44b2-a5b3-2919f8f8ee68/keystone-cron/0.log" Nov 26 07:02:10 crc kubenswrapper[4871]: I1126 07:02:10.229956 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29402341-l6znv_444a6e6f-d05b-4413-b021-fe7770e8e431/keystone-cron/0.log" Nov 26 07:02:10 crc kubenswrapper[4871]: I1126 07:02:10.355133 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_e20fd17b-5b64-4272-9876-347ea057aa04/kube-state-metrics/3.log" Nov 26 07:02:10 crc kubenswrapper[4871]: I1126 07:02:10.370936 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-847fdf8fc-mswx4_609a98bb-6812-4d0f-b408-023056fc5bca/keystone-api/0.log" Nov 26 07:02:10 crc kubenswrapper[4871]: I1126 07:02:10.403631 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_e20fd17b-5b64-4272-9876-347ea057aa04/kube-state-metrics/2.log" Nov 26 07:02:10 crc kubenswrapper[4871]: I1126 07:02:10.573761 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-6tkgn_95ebef76-794b-40b5-bf99-3604b66446f2/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 07:02:10 crc kubenswrapper[4871]: I1126 07:02:10.983319 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-9mvhf_b6bbc102-0536-4833-8d96-a94360126601/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 07:02:11 crc kubenswrapper[4871]: I1126 07:02:11.191033 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5566bf8457-7qhhj_7dc2c737-ebec-4a5a-b06b-ffc355fb0a77/neutron-httpd/0.log" Nov 26 07:02:11 crc kubenswrapper[4871]: I1126 07:02:11.226908 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5566bf8457-7qhhj_7dc2c737-ebec-4a5a-b06b-ffc355fb0a77/neutron-api/0.log" Nov 26 07:02:11 crc kubenswrapper[4871]: I1126 07:02:11.947563 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_20a39a9e-9f10-45c6-be1c-9834e366658f/nova-cell0-conductor-conductor/0.log" Nov 26 07:02:12 crc kubenswrapper[4871]: I1126 07:02:12.176201 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_a703f1e3-b021-4fe0-9c3f-a5a90b96678e/nova-cell1-conductor-conductor/0.log" Nov 26 07:02:12 crc kubenswrapper[4871]: I1126 07:02:12.835168 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_94714c91-ac3e-4195-9c74-84e090b73a6e/nova-cell1-novncproxy-novncproxy/0.log" Nov 26 07:02:12 crc kubenswrapper[4871]: I1126 07:02:12.918548 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-wnbt7_4872fb15-1719-4e77-b0c1-7a2754ff7991/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 07:02:13 crc kubenswrapper[4871]: I1126 07:02:13.034931 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_b0049ce2-17f9-4372-a66e-7c03a3763460/nova-api-log/0.log" Nov 26 07:02:13 crc kubenswrapper[4871]: I1126 07:02:13.198245 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_d8636384-aac2-4fd2-8f51-5cd6ca47c362/nova-metadata-log/0.log" Nov 26 07:02:13 crc kubenswrapper[4871]: I1126 07:02:13.358944 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_b0049ce2-17f9-4372-a66e-7c03a3763460/nova-api-api/0.log" Nov 26 07:02:13 crc kubenswrapper[4871]: I1126 07:02:13.566672 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_1a6ce456-795f-4bf1-bab9-f5de7cfd7abe/mysql-bootstrap/0.log" Nov 26 07:02:13 crc kubenswrapper[4871]: I1126 07:02:13.791299 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_27f01c4d-ea3c-4e99-ba8a-e31d9628307b/nova-scheduler-scheduler/0.log" Nov 26 07:02:13 crc kubenswrapper[4871]: I1126 07:02:13.813770 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_1a6ce456-795f-4bf1-bab9-f5de7cfd7abe/galera/0.log" Nov 26 07:02:13 crc kubenswrapper[4871]: I1126 07:02:13.818025 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_1a6ce456-795f-4bf1-bab9-f5de7cfd7abe/mysql-bootstrap/0.log" Nov 26 07:02:14 crc kubenswrapper[4871]: I1126 07:02:14.055814 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_fef4681d-3f18-4ed5-b251-92f53274dacd/mysql-bootstrap/0.log" Nov 26 07:02:14 crc kubenswrapper[4871]: I1126 07:02:14.223339 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_fef4681d-3f18-4ed5-b251-92f53274dacd/mysql-bootstrap/0.log" Nov 26 07:02:14 crc kubenswrapper[4871]: I1126 07:02:14.331141 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_fef4681d-3f18-4ed5-b251-92f53274dacd/galera/0.log" Nov 26 07:02:14 crc kubenswrapper[4871]: I1126 07:02:14.446431 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_7218b9c9-2508-46eb-8942-4c22b0c706cf/openstackclient/0.log" Nov 26 07:02:14 crc kubenswrapper[4871]: I1126 07:02:14.675073 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-m255d_de8a947b-6c51-4c33-b221-ea16d851bafb/ovn-controller/0.log" Nov 26 07:02:14 crc kubenswrapper[4871]: I1126 07:02:14.806660 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-wmd2n_d0e1b432-5c82-4c9d-8b98-fe3dd7e24fdd/openstack-network-exporter/0.log" Nov 26 07:02:14 crc kubenswrapper[4871]: I1126 07:02:14.951488 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-t9t82_9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e/ovsdb-server-init/0.log" Nov 26 07:02:15 crc kubenswrapper[4871]: I1126 07:02:15.156322 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-t9t82_9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e/ovsdb-server/0.log" Nov 26 07:02:15 crc kubenswrapper[4871]: I1126 07:02:15.184128 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-t9t82_9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e/ovsdb-server-init/0.log" Nov 26 07:02:15 crc kubenswrapper[4871]: I1126 07:02:15.502073 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-896gl_8d747185-1d52-4102-be05-7f18ff179f3a/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 07:02:15 crc kubenswrapper[4871]: I1126 07:02:15.632332 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-t9t82_9a7a8e3f-4fa4-484d-8f81-63a8e9dab10e/ovs-vswitchd/0.log" Nov 26 07:02:15 crc kubenswrapper[4871]: I1126 07:02:15.742983 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_251bc2ce-32a0-4d94-843b-f7ac83e601f4/openstack-network-exporter/0.log" Nov 26 07:02:15 crc kubenswrapper[4871]: I1126 07:02:15.756137 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_d8636384-aac2-4fd2-8f51-5cd6ca47c362/nova-metadata-metadata/0.log" Nov 26 07:02:15 crc kubenswrapper[4871]: I1126 07:02:15.819276 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_251bc2ce-32a0-4d94-843b-f7ac83e601f4/ovn-northd/0.log" Nov 26 07:02:16 crc kubenswrapper[4871]: I1126 07:02:16.033165 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_df0ee863-8fbb-4a6e-86e3-8d56cf38da47/ovsdbserver-nb/0.log" Nov 26 07:02:16 crc kubenswrapper[4871]: I1126 07:02:16.091750 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_df0ee863-8fbb-4a6e-86e3-8d56cf38da47/openstack-network-exporter/0.log" Nov 26 07:02:16 crc kubenswrapper[4871]: I1126 07:02:16.155458 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_bd85545a-d991-4635-8d4b-2b81937e389f/openstack-network-exporter/0.log" Nov 26 07:02:16 crc kubenswrapper[4871]: I1126 07:02:16.273137 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_bd85545a-d991-4635-8d4b-2b81937e389f/ovsdbserver-sb/0.log" Nov 26 07:02:16 crc kubenswrapper[4871]: I1126 07:02:16.582936 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-665fcf99fb-m82r7_ae63dcae-cddc-4f63-acc0-4ec3254a6116/placement-api/0.log" Nov 26 07:02:16 crc kubenswrapper[4871]: I1126 07:02:16.609105 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8ea6e2b4-f88f-48c1-9044-5697a38a7abb/init-config-reloader/0.log" Nov 26 07:02:16 crc kubenswrapper[4871]: I1126 07:02:16.654096 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-665fcf99fb-m82r7_ae63dcae-cddc-4f63-acc0-4ec3254a6116/placement-log/0.log" Nov 26 07:02:16 crc kubenswrapper[4871]: I1126 07:02:16.802689 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8ea6e2b4-f88f-48c1-9044-5697a38a7abb/init-config-reloader/0.log" Nov 26 07:02:16 crc kubenswrapper[4871]: I1126 07:02:16.866828 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8ea6e2b4-f88f-48c1-9044-5697a38a7abb/config-reloader/0.log" Nov 26 07:02:16 crc kubenswrapper[4871]: I1126 07:02:16.904355 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8ea6e2b4-f88f-48c1-9044-5697a38a7abb/prometheus/0.log" Nov 26 07:02:16 crc kubenswrapper[4871]: I1126 07:02:16.928102 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8ea6e2b4-f88f-48c1-9044-5697a38a7abb/thanos-sidecar/0.log" Nov 26 07:02:17 crc kubenswrapper[4871]: I1126 07:02:17.084504 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c02a9e9c-8083-4903-a64d-a140b1c9c143/setup-container/0.log" Nov 26 07:02:17 crc kubenswrapper[4871]: I1126 07:02:17.297766 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c02a9e9c-8083-4903-a64d-a140b1c9c143/rabbitmq/0.log" Nov 26 07:02:17 crc kubenswrapper[4871]: I1126 07:02:17.363510 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_7df95f1b-7a5b-445e-bb56-b17695a0bde9/setup-container/0.log" Nov 26 07:02:17 crc kubenswrapper[4871]: I1126 07:02:17.404319 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_c02a9e9c-8083-4903-a64d-a140b1c9c143/setup-container/0.log" Nov 26 07:02:17 crc kubenswrapper[4871]: I1126 07:02:17.523848 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_7df95f1b-7a5b-445e-bb56-b17695a0bde9/setup-container/0.log" Nov 26 07:02:17 crc kubenswrapper[4871]: I1126 07:02:17.697644 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-notifications-server-0_7df95f1b-7a5b-445e-bb56-b17695a0bde9/rabbitmq/0.log" Nov 26 07:02:17 crc kubenswrapper[4871]: I1126 07:02:17.794123 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_f823aa11-fe59-4296-9a43-81bfc1275737/setup-container/0.log" Nov 26 07:02:17 crc kubenswrapper[4871]: I1126 07:02:17.925358 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_f823aa11-fe59-4296-9a43-81bfc1275737/setup-container/0.log" Nov 26 07:02:17 crc kubenswrapper[4871]: I1126 07:02:17.987823 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_f823aa11-fe59-4296-9a43-81bfc1275737/rabbitmq/0.log" Nov 26 07:02:18 crc kubenswrapper[4871]: I1126 07:02:18.250725 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-x6s6m_d39ab741-a044-4ac6-9f2a-0949948cafdb/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 07:02:18 crc kubenswrapper[4871]: I1126 07:02:18.414717 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-vgjkj_4027b3b8-7a16-419f-8b16-52ff000c7268/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 07:02:18 crc kubenswrapper[4871]: I1126 07:02:18.474109 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-d5jw7_2024b11a-b0d5-4988-ba36-cdcb7eb4d5c2/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 07:02:18 crc kubenswrapper[4871]: I1126 07:02:18.638630 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-cglzv_fca1e368-592f-4da5-b8f8-12bb29eca743/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 07:02:18 crc kubenswrapper[4871]: I1126 07:02:18.751804 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-d5dz5_0a3a838b-5101-4706-a5d9-50fc5797ba72/ssh-known-hosts-edpm-deployment/0.log" Nov 26 07:02:19 crc kubenswrapper[4871]: I1126 07:02:19.007462 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-78dd8485c9-fx6sv_fcca2594-c385-49cd-8354-7e4fcfab96c8/proxy-server/0.log" Nov 26 07:02:19 crc kubenswrapper[4871]: I1126 07:02:19.177140 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-78dd8485c9-fx6sv_fcca2594-c385-49cd-8354-7e4fcfab96c8/proxy-httpd/0.log" Nov 26 07:02:19 crc kubenswrapper[4871]: I1126 07:02:19.225919 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/account-auditor/0.log" Nov 26 07:02:19 crc kubenswrapper[4871]: I1126 07:02:19.251972 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-qtnpf_bd92bcfc-31b0-4ec0-853f-0e2bbfc2c53d/swift-ring-rebalance/0.log" Nov 26 07:02:19 crc kubenswrapper[4871]: I1126 07:02:19.400450 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/account-reaper/0.log" Nov 26 07:02:19 crc kubenswrapper[4871]: I1126 07:02:19.464067 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/account-server/0.log" Nov 26 07:02:19 crc kubenswrapper[4871]: I1126 07:02:19.550374 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/account-replicator/0.log" Nov 26 07:02:19 crc kubenswrapper[4871]: I1126 07:02:19.640130 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/container-replicator/0.log" Nov 26 07:02:19 crc kubenswrapper[4871]: I1126 07:02:19.652088 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/container-auditor/0.log" Nov 26 07:02:19 crc kubenswrapper[4871]: I1126 07:02:19.763263 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/container-server/0.log" Nov 26 07:02:19 crc kubenswrapper[4871]: I1126 07:02:19.789312 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/container-updater/0.log" Nov 26 07:02:19 crc kubenswrapper[4871]: I1126 07:02:19.879566 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/object-expirer/0.log" Nov 26 07:02:19 crc kubenswrapper[4871]: I1126 07:02:19.919379 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/object-auditor/0.log" Nov 26 07:02:20 crc kubenswrapper[4871]: I1126 07:02:20.036477 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/object-replicator/0.log" Nov 26 07:02:20 crc kubenswrapper[4871]: I1126 07:02:20.043991 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/object-server/0.log" Nov 26 07:02:20 crc kubenswrapper[4871]: I1126 07:02:20.147972 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/rsync/0.log" Nov 26 07:02:20 crc kubenswrapper[4871]: I1126 07:02:20.151007 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/object-updater/0.log" Nov 26 07:02:20 crc kubenswrapper[4871]: I1126 07:02:20.297425 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c927c3b8-9d32-4cbb-97cc-d834a6e225c1/swift-recon-cron/0.log" Nov 26 07:02:20 crc kubenswrapper[4871]: I1126 07:02:20.507169 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:02:20 crc kubenswrapper[4871]: E1126 07:02:20.507430 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:02:20 crc kubenswrapper[4871]: I1126 07:02:20.788756 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-8mffj_bfc1b363-fb5b-4872-bf7f-215dc9c617b5/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 07:02:20 crc kubenswrapper[4871]: I1126 07:02:20.888401 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_d4d1d560-ed1f-4b35-bde2-53c83e6ddabc/tempest-tests-tempest-tests-runner/0.log" Nov 26 07:02:20 crc kubenswrapper[4871]: I1126 07:02:20.996633 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_69802496-881e-4259-a45b-a75b1434b79d/test-operator-logs-container/0.log" Nov 26 07:02:21 crc kubenswrapper[4871]: I1126 07:02:21.116827 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-g6lfk_a8ef4ffd-bd80-4733-a2dd-9dfd1ac695f4/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 26 07:02:22 crc kubenswrapper[4871]: I1126 07:02:22.062279 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-applier-0_e939bb2f-dadb-4353-8845-f31c42b87a75/watcher-applier/0.log" Nov 26 07:02:22 crc kubenswrapper[4871]: I1126 07:02:22.510942 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_154bc562-d8d8-4608-8973-66b427a4f98f/watcher-api-log/0.log" Nov 26 07:02:25 crc kubenswrapper[4871]: I1126 07:02:25.161747 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-decision-engine-0_de5b1e93-a28e-405b-8ab4-a1bc50922b2e/watcher-decision-engine/0.log" Nov 26 07:02:25 crc kubenswrapper[4871]: I1126 07:02:25.588853 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_2757b1a6-7b8f-4008-8a08-96985496ec1a/memcached/0.log" Nov 26 07:02:26 crc kubenswrapper[4871]: I1126 07:02:26.219381 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_154bc562-d8d8-4608-8973-66b427a4f98f/watcher-api/0.log" Nov 26 07:02:32 crc kubenswrapper[4871]: I1126 07:02:32.514338 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:02:32 crc kubenswrapper[4871]: E1126 07:02:32.515554 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:02:46 crc kubenswrapper[4871]: I1126 07:02:46.507475 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:02:46 crc kubenswrapper[4871]: E1126 07:02:46.509754 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:02:47 crc kubenswrapper[4871]: I1126 07:02:47.322873 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b_2a991e15-5da4-457c-95b8-64e0ba0b7f0c/util/0.log" Nov 26 07:02:47 crc kubenswrapper[4871]: I1126 07:02:47.531584 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b_2a991e15-5da4-457c-95b8-64e0ba0b7f0c/util/0.log" Nov 26 07:02:47 crc kubenswrapper[4871]: I1126 07:02:47.538009 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b_2a991e15-5da4-457c-95b8-64e0ba0b7f0c/pull/0.log" Nov 26 07:02:47 crc kubenswrapper[4871]: I1126 07:02:47.545183 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b_2a991e15-5da4-457c-95b8-64e0ba0b7f0c/pull/0.log" Nov 26 07:02:47 crc kubenswrapper[4871]: I1126 07:02:47.761277 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b_2a991e15-5da4-457c-95b8-64e0ba0b7f0c/pull/0.log" Nov 26 07:02:47 crc kubenswrapper[4871]: I1126 07:02:47.779164 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b_2a991e15-5da4-457c-95b8-64e0ba0b7f0c/extract/0.log" Nov 26 07:02:47 crc kubenswrapper[4871]: I1126 07:02:47.781827 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a851be339ab153e8317811375455cffeec26c0cd29d35bc2f201b76006d269b_2a991e15-5da4-457c-95b8-64e0ba0b7f0c/util/0.log" Nov 26 07:02:47 crc kubenswrapper[4871]: I1126 07:02:47.917460 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-bdpn4_8c65e9f4-e3de-4bce-851a-f85c1036daa7/kube-rbac-proxy/0.log" Nov 26 07:02:47 crc kubenswrapper[4871]: I1126 07:02:47.955153 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-bdpn4_8c65e9f4-e3de-4bce-851a-f85c1036daa7/manager/2.log" Nov 26 07:02:47 crc kubenswrapper[4871]: I1126 07:02:47.972977 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-bdpn4_8c65e9f4-e3de-4bce-851a-f85c1036daa7/manager/1.log" Nov 26 07:02:48 crc kubenswrapper[4871]: I1126 07:02:48.074519 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-wmwwk_ea13fc75-b3f0-48d3-9d86-5262df2957eb/kube-rbac-proxy/0.log" Nov 26 07:02:48 crc kubenswrapper[4871]: I1126 07:02:48.127243 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-wmwwk_ea13fc75-b3f0-48d3-9d86-5262df2957eb/manager/3.log" Nov 26 07:02:48 crc kubenswrapper[4871]: I1126 07:02:48.143864 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-wmwwk_ea13fc75-b3f0-48d3-9d86-5262df2957eb/manager/2.log" Nov 26 07:02:48 crc kubenswrapper[4871]: I1126 07:02:48.252942 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-tsz49_70168336-54b1-481f-b6a0-d565be07d353/kube-rbac-proxy/0.log" Nov 26 07:02:48 crc kubenswrapper[4871]: I1126 07:02:48.287331 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-tsz49_70168336-54b1-481f-b6a0-d565be07d353/manager/3.log" Nov 26 07:02:48 crc kubenswrapper[4871]: I1126 07:02:48.324358 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-tsz49_70168336-54b1-481f-b6a0-d565be07d353/manager/2.log" Nov 26 07:02:48 crc kubenswrapper[4871]: I1126 07:02:48.459478 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-czv5j_94ce6277-5176-415b-9f4d-847a73c93723/kube-rbac-proxy/0.log" Nov 26 07:02:48 crc kubenswrapper[4871]: I1126 07:02:48.468591 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-czv5j_94ce6277-5176-415b-9f4d-847a73c93723/manager/3.log" Nov 26 07:02:48 crc kubenswrapper[4871]: I1126 07:02:48.511893 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-czv5j_94ce6277-5176-415b-9f4d-847a73c93723/manager/2.log" Nov 26 07:02:48 crc kubenswrapper[4871]: I1126 07:02:48.617135 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-5kslm_9253bdc4-d16f-42eb-8704-0965e99dfe47/kube-rbac-proxy/0.log" Nov 26 07:02:48 crc kubenswrapper[4871]: I1126 07:02:48.657301 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-5kslm_9253bdc4-d16f-42eb-8704-0965e99dfe47/manager/3.log" Nov 26 07:02:48 crc kubenswrapper[4871]: I1126 07:02:48.688087 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-5kslm_9253bdc4-d16f-42eb-8704-0965e99dfe47/manager/2.log" Nov 26 07:02:48 crc kubenswrapper[4871]: I1126 07:02:48.780627 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-clm5v_4659b831-32eb-4da2-97f3-f654a299605e/kube-rbac-proxy/0.log" Nov 26 07:02:48 crc kubenswrapper[4871]: I1126 07:02:48.823545 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-clm5v_4659b831-32eb-4da2-97f3-f654a299605e/manager/1.log" Nov 26 07:02:48 crc kubenswrapper[4871]: I1126 07:02:48.854948 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-clm5v_4659b831-32eb-4da2-97f3-f654a299605e/manager/2.log" Nov 26 07:02:48 crc kubenswrapper[4871]: I1126 07:02:48.991667 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-x5hqw_06b4e3ae-765b-41c4-9334-4e33c2dc305f/manager/3.log" Nov 26 07:02:49 crc kubenswrapper[4871]: I1126 07:02:49.004710 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-x5hqw_06b4e3ae-765b-41c4-9334-4e33c2dc305f/kube-rbac-proxy/0.log" Nov 26 07:02:49 crc kubenswrapper[4871]: I1126 07:02:49.047334 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-x5hqw_06b4e3ae-765b-41c4-9334-4e33c2dc305f/manager/2.log" Nov 26 07:02:49 crc kubenswrapper[4871]: I1126 07:02:49.170772 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-lzsqj_51410db5-d309-4625-8f36-02cf8f0ba419/kube-rbac-proxy/0.log" Nov 26 07:02:49 crc kubenswrapper[4871]: I1126 07:02:49.174368 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-lzsqj_51410db5-d309-4625-8f36-02cf8f0ba419/manager/3.log" Nov 26 07:02:49 crc kubenswrapper[4871]: I1126 07:02:49.230180 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-lzsqj_51410db5-d309-4625-8f36-02cf8f0ba419/manager/2.log" Nov 26 07:02:49 crc kubenswrapper[4871]: I1126 07:02:49.357560 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-4gvxx_32cd59dd-1a82-4fce-81b1-ebc8f75f1e93/kube-rbac-proxy/0.log" Nov 26 07:02:49 crc kubenswrapper[4871]: I1126 07:02:49.424872 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-4gvxx_32cd59dd-1a82-4fce-81b1-ebc8f75f1e93/manager/2.log" Nov 26 07:02:49 crc kubenswrapper[4871]: I1126 07:02:49.444479 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-4gvxx_32cd59dd-1a82-4fce-81b1-ebc8f75f1e93/manager/3.log" Nov 26 07:02:49 crc kubenswrapper[4871]: I1126 07:02:49.581145 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-jvztg_6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c/kube-rbac-proxy/0.log" Nov 26 07:02:49 crc kubenswrapper[4871]: I1126 07:02:49.622781 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-jvztg_6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c/manager/3.log" Nov 26 07:02:49 crc kubenswrapper[4871]: I1126 07:02:49.673633 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-jvztg_6dc2f30e-6f6a-4be9-b3b4-f2c7c636ca2c/manager/2.log" Nov 26 07:02:49 crc kubenswrapper[4871]: I1126 07:02:49.784021 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-9xghq_2c7b5f25-e4ef-4abd-ba84-61b98f194ddd/kube-rbac-proxy/0.log" Nov 26 07:02:49 crc kubenswrapper[4871]: I1126 07:02:49.800080 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-9xghq_2c7b5f25-e4ef-4abd-ba84-61b98f194ddd/manager/3.log" Nov 26 07:02:49 crc kubenswrapper[4871]: I1126 07:02:49.875911 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-9xghq_2c7b5f25-e4ef-4abd-ba84-61b98f194ddd/manager/2.log" Nov 26 07:02:49 crc kubenswrapper[4871]: I1126 07:02:49.966330 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-6lpnj_19a75285-dcb7-4f34-b79c-613c96d555de/kube-rbac-proxy/0.log" Nov 26 07:02:50 crc kubenswrapper[4871]: I1126 07:02:50.019453 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-6lpnj_19a75285-dcb7-4f34-b79c-613c96d555de/manager/3.log" Nov 26 07:02:50 crc kubenswrapper[4871]: I1126 07:02:50.054985 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-6lpnj_19a75285-dcb7-4f34-b79c-613c96d555de/manager/2.log" Nov 26 07:02:50 crc kubenswrapper[4871]: I1126 07:02:50.153422 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-rlr55_f68377a4-dee0-404b-988a-4f0673466e62/kube-rbac-proxy/0.log" Nov 26 07:02:50 crc kubenswrapper[4871]: I1126 07:02:50.178858 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-rlr55_f68377a4-dee0-404b-988a-4f0673466e62/manager/2.log" Nov 26 07:02:50 crc kubenswrapper[4871]: I1126 07:02:50.238895 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-rlr55_f68377a4-dee0-404b-988a-4f0673466e62/manager/1.log" Nov 26 07:02:50 crc kubenswrapper[4871]: I1126 07:02:50.363385 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-9lvtk_33ba2b4e-6239-43c0-a694-6495b7ae2ba3/kube-rbac-proxy/0.log" Nov 26 07:02:50 crc kubenswrapper[4871]: I1126 07:02:50.374616 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-9lvtk_33ba2b4e-6239-43c0-a694-6495b7ae2ba3/manager/3.log" Nov 26 07:02:50 crc kubenswrapper[4871]: I1126 07:02:50.395792 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-9lvtk_33ba2b4e-6239-43c0-a694-6495b7ae2ba3/manager/2.log" Nov 26 07:02:50 crc kubenswrapper[4871]: I1126 07:02:50.540516 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg_6b5541da-9198-4f49-998b-1bfd982089d1/manager/1.log" Nov 26 07:02:50 crc kubenswrapper[4871]: I1126 07:02:50.566810 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg_6b5541da-9198-4f49-998b-1bfd982089d1/manager/0.log" Nov 26 07:02:50 crc kubenswrapper[4871]: I1126 07:02:50.590858 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6ffd77ccd-ztlgg_6b5541da-9198-4f49-998b-1bfd982089d1/kube-rbac-proxy/0.log" Nov 26 07:02:50 crc kubenswrapper[4871]: I1126 07:02:50.743575 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-56868586f6-2v8hx_6d7ff4ed-503b-4184-8633-47598150b7f0/manager/2.log" Nov 26 07:02:50 crc kubenswrapper[4871]: I1126 07:02:50.887430 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5675dd9766-bp9px_d78961c7-c9ff-4550-bf75-add0fcef53fe/operator/1.log" Nov 26 07:02:51 crc kubenswrapper[4871]: I1126 07:02:51.067209 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5675dd9766-bp9px_d78961c7-c9ff-4550-bf75-add0fcef53fe/operator/0.log" Nov 26 07:02:51 crc kubenswrapper[4871]: I1126 07:02:51.097178 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-56868586f6-2v8hx_6d7ff4ed-503b-4184-8633-47598150b7f0/manager/3.log" Nov 26 07:02:51 crc kubenswrapper[4871]: I1126 07:02:51.101661 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-pwvh2_87895915-b98b-423d-b00c-9dd92656f1a8/registry-server/0.log" Nov 26 07:02:51 crc kubenswrapper[4871]: I1126 07:02:51.180808 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-shgb6_6ccd73b2-dbfd-4cd6-845c-a61af4f20f96/kube-rbac-proxy/0.log" Nov 26 07:02:51 crc kubenswrapper[4871]: I1126 07:02:51.286721 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-shgb6_6ccd73b2-dbfd-4cd6-845c-a61af4f20f96/manager/3.log" Nov 26 07:02:51 crc kubenswrapper[4871]: I1126 07:02:51.295674 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-shgb6_6ccd73b2-dbfd-4cd6-845c-a61af4f20f96/manager/2.log" Nov 26 07:02:51 crc kubenswrapper[4871]: I1126 07:02:51.335118 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-dxbwn_1cc75505-b927-488b-8a16-4fda9a1c2dca/kube-rbac-proxy/0.log" Nov 26 07:02:51 crc kubenswrapper[4871]: I1126 07:02:51.381283 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-dxbwn_1cc75505-b927-488b-8a16-4fda9a1c2dca/manager/3.log" Nov 26 07:02:51 crc kubenswrapper[4871]: I1126 07:02:51.491874 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-dxbwn_1cc75505-b927-488b-8a16-4fda9a1c2dca/manager/2.log" Nov 26 07:02:51 crc kubenswrapper[4871]: I1126 07:02:51.501768 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-6c6pc_0b2406e7-8b16-45e1-b726-645d22421af5/operator/3.log" Nov 26 07:02:51 crc kubenswrapper[4871]: I1126 07:02:51.545095 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-6c6pc_0b2406e7-8b16-45e1-b726-645d22421af5/operator/2.log" Nov 26 07:02:51 crc kubenswrapper[4871]: I1126 07:02:51.667754 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-skx5k_4b0778b1-b974-4ce6-bac4-59920ab67dd7/manager/3.log" Nov 26 07:02:51 crc kubenswrapper[4871]: I1126 07:02:51.678762 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-skx5k_4b0778b1-b974-4ce6-bac4-59920ab67dd7/kube-rbac-proxy/0.log" Nov 26 07:02:51 crc kubenswrapper[4871]: I1126 07:02:51.688691 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-skx5k_4b0778b1-b974-4ce6-bac4-59920ab67dd7/manager/2.log" Nov 26 07:02:51 crc kubenswrapper[4871]: I1126 07:02:51.817054 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-6kccm_974fe30e-68b5-42bb-9940-a2000ab315f8/kube-rbac-proxy/0.log" Nov 26 07:02:51 crc kubenswrapper[4871]: I1126 07:02:51.885396 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-6kccm_974fe30e-68b5-42bb-9940-a2000ab315f8/manager/2.log" Nov 26 07:02:51 crc kubenswrapper[4871]: I1126 07:02:51.913345 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-6kccm_974fe30e-68b5-42bb-9940-a2000ab315f8/manager/3.log" Nov 26 07:02:51 crc kubenswrapper[4871]: I1126 07:02:51.950744 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-jj87z_1b4fb0bb-1050-4bda-acf4-c3efafc79e4a/kube-rbac-proxy/0.log" Nov 26 07:02:52 crc kubenswrapper[4871]: I1126 07:02:52.031742 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-jj87z_1b4fb0bb-1050-4bda-acf4-c3efafc79e4a/manager/1.log" Nov 26 07:02:52 crc kubenswrapper[4871]: I1126 07:02:52.047768 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-jj87z_1b4fb0bb-1050-4bda-acf4-c3efafc79e4a/manager/0.log" Nov 26 07:02:52 crc kubenswrapper[4871]: I1126 07:02:52.152686 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-v95x7_8d32351e-c0cc-4c2a-89b2-a79b61cf632e/kube-rbac-proxy/0.log" Nov 26 07:02:52 crc kubenswrapper[4871]: I1126 07:02:52.176109 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-v95x7_8d32351e-c0cc-4c2a-89b2-a79b61cf632e/manager/3.log" Nov 26 07:02:52 crc kubenswrapper[4871]: I1126 07:02:52.215840 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-v95x7_8d32351e-c0cc-4c2a-89b2-a79b61cf632e/manager/2.log" Nov 26 07:03:01 crc kubenswrapper[4871]: I1126 07:03:01.506934 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:03:01 crc kubenswrapper[4871]: E1126 07:03:01.507803 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:03:09 crc kubenswrapper[4871]: I1126 07:03:09.675300 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-2pb6b_dd4302fa-1a28-4718-b14c-f85e45519916/control-plane-machine-set-operator/0.log" Nov 26 07:03:09 crc kubenswrapper[4871]: I1126 07:03:09.771423 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-wg5vb_4edc5fd4-3610-4fa0-bf22-5ee6a41f6589/kube-rbac-proxy/0.log" Nov 26 07:03:09 crc kubenswrapper[4871]: I1126 07:03:09.818965 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-wg5vb_4edc5fd4-3610-4fa0-bf22-5ee6a41f6589/machine-api-operator/0.log" Nov 26 07:03:12 crc kubenswrapper[4871]: I1126 07:03:12.507240 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:03:12 crc kubenswrapper[4871]: E1126 07:03:12.508184 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:03:22 crc kubenswrapper[4871]: I1126 07:03:22.062070 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-gkprb_c2ecf354-32f2-4cb3-80f1-e964ce5a3bdc/cert-manager-controller/1.log" Nov 26 07:03:22 crc kubenswrapper[4871]: I1126 07:03:22.141040 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-gkprb_c2ecf354-32f2-4cb3-80f1-e964ce5a3bdc/cert-manager-controller/0.log" Nov 26 07:03:22 crc kubenswrapper[4871]: I1126 07:03:22.262322 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-v7fsr_250180c0-d204-44e0-83b1-64259ea3bd68/cert-manager-cainjector/1.log" Nov 26 07:03:22 crc kubenswrapper[4871]: I1126 07:03:22.287917 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-v7fsr_250180c0-d204-44e0-83b1-64259ea3bd68/cert-manager-cainjector/0.log" Nov 26 07:03:22 crc kubenswrapper[4871]: I1126 07:03:22.458123 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-2v767_6a758ba2-2916-440d-9a57-149111e0ff4c/cert-manager-webhook/0.log" Nov 26 07:03:25 crc kubenswrapper[4871]: I1126 07:03:25.507415 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:03:25 crc kubenswrapper[4871]: E1126 07:03:25.507916 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:03:34 crc kubenswrapper[4871]: I1126 07:03:34.673007 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-kgf8h_bacf9337-da95-4df5-9f49-a9e6c46ac060/nmstate-console-plugin/0.log" Nov 26 07:03:34 crc kubenswrapper[4871]: I1126 07:03:34.848122 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-w8pzw_f24be1df-be1a-4389-a3d5-7842b91f18b4/nmstate-handler/0.log" Nov 26 07:03:34 crc kubenswrapper[4871]: I1126 07:03:34.901133 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-z92tv_d6cb9226-08bd-44d7-97b7-ac75848ef5bd/kube-rbac-proxy/0.log" Nov 26 07:03:34 crc kubenswrapper[4871]: I1126 07:03:34.956089 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-z92tv_d6cb9226-08bd-44d7-97b7-ac75848ef5bd/nmstate-metrics/0.log" Nov 26 07:03:35 crc kubenswrapper[4871]: I1126 07:03:35.062742 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-xm9jn_4b9d5c2d-8d95-4b86-86e4-6e425a8c6814/nmstate-operator/0.log" Nov 26 07:03:35 crc kubenswrapper[4871]: I1126 07:03:35.187673 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-ngv76_85b6422a-f943-4ced-8695-3d7f52f5f145/nmstate-webhook/0.log" Nov 26 07:03:38 crc kubenswrapper[4871]: I1126 07:03:38.507403 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:03:38 crc kubenswrapper[4871]: E1126 07:03:38.508029 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:03:49 crc kubenswrapper[4871]: I1126 07:03:49.468481 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-n9ldw_df243ac7-b567-4159-8103-103df0831280/kube-rbac-proxy/0.log" Nov 26 07:03:49 crc kubenswrapper[4871]: I1126 07:03:49.637338 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-n9ldw_df243ac7-b567-4159-8103-103df0831280/controller/0.log" Nov 26 07:03:49 crc kubenswrapper[4871]: I1126 07:03:49.686871 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-frr-files/0.log" Nov 26 07:03:50 crc kubenswrapper[4871]: I1126 07:03:50.015112 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-reloader/0.log" Nov 26 07:03:50 crc kubenswrapper[4871]: I1126 07:03:50.020308 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-reloader/0.log" Nov 26 07:03:50 crc kubenswrapper[4871]: I1126 07:03:50.067031 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-frr-files/0.log" Nov 26 07:03:50 crc kubenswrapper[4871]: I1126 07:03:50.083694 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-metrics/0.log" Nov 26 07:03:50 crc kubenswrapper[4871]: I1126 07:03:50.224615 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-frr-files/0.log" Nov 26 07:03:50 crc kubenswrapper[4871]: I1126 07:03:50.245924 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-metrics/0.log" Nov 26 07:03:50 crc kubenswrapper[4871]: I1126 07:03:50.257677 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-reloader/0.log" Nov 26 07:03:50 crc kubenswrapper[4871]: I1126 07:03:50.302963 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-metrics/0.log" Nov 26 07:03:50 crc kubenswrapper[4871]: I1126 07:03:50.478357 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-metrics/0.log" Nov 26 07:03:50 crc kubenswrapper[4871]: I1126 07:03:50.493212 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/controller/0.log" Nov 26 07:03:50 crc kubenswrapper[4871]: I1126 07:03:50.548170 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-frr-files/0.log" Nov 26 07:03:50 crc kubenswrapper[4871]: I1126 07:03:50.551785 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/cp-reloader/0.log" Nov 26 07:03:50 crc kubenswrapper[4871]: I1126 07:03:50.680661 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/frr-metrics/0.log" Nov 26 07:03:50 crc kubenswrapper[4871]: I1126 07:03:50.752163 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/kube-rbac-proxy/0.log" Nov 26 07:03:50 crc kubenswrapper[4871]: I1126 07:03:50.797964 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/kube-rbac-proxy-frr/0.log" Nov 26 07:03:50 crc kubenswrapper[4871]: I1126 07:03:50.944162 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/reloader/0.log" Nov 26 07:03:51 crc kubenswrapper[4871]: I1126 07:03:51.043876 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-nnxbg_91a86765-1b7c-445b-8930-dc06e96fc752/frr-k8s-webhook-server/0.log" Nov 26 07:03:51 crc kubenswrapper[4871]: I1126 07:03:51.267400 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-645b9949f7-48k8g_0f2d5628-2ad3-400c-bc77-b0251683a83a/manager/3.log" Nov 26 07:03:51 crc kubenswrapper[4871]: I1126 07:03:51.343713 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-645b9949f7-48k8g_0f2d5628-2ad3-400c-bc77-b0251683a83a/manager/2.log" Nov 26 07:03:51 crc kubenswrapper[4871]: I1126 07:03:51.525135 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-69c6746fd5-pkb65_35333648-4e74-4c66-803e-091d7d5673ca/webhook-server/0.log" Nov 26 07:03:51 crc kubenswrapper[4871]: I1126 07:03:51.671234 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-2nt4b_e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b/kube-rbac-proxy/0.log" Nov 26 07:03:52 crc kubenswrapper[4871]: I1126 07:03:52.363578 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-2nt4b_e5a62ab6-dbcc-4ea0-b2fa-da2174bb0d8b/speaker/0.log" Nov 26 07:03:52 crc kubenswrapper[4871]: I1126 07:03:52.505311 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-dzkmx_1e670c64-e309-46e6-bdb8-797f85aee3c9/frr/0.log" Nov 26 07:03:52 crc kubenswrapper[4871]: I1126 07:03:52.519715 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:03:52 crc kubenswrapper[4871]: E1126 07:03:52.520028 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:04:03 crc kubenswrapper[4871]: I1126 07:04:03.508234 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:04:03 crc kubenswrapper[4871]: E1126 07:04:03.509762 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:04:05 crc kubenswrapper[4871]: I1126 07:04:05.446893 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr_b8b55d68-fcd3-43c4-94fe-344ed7cdb002/util/0.log" Nov 26 07:04:05 crc kubenswrapper[4871]: I1126 07:04:05.629756 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr_b8b55d68-fcd3-43c4-94fe-344ed7cdb002/pull/0.log" Nov 26 07:04:05 crc kubenswrapper[4871]: I1126 07:04:05.643851 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr_b8b55d68-fcd3-43c4-94fe-344ed7cdb002/pull/0.log" Nov 26 07:04:05 crc kubenswrapper[4871]: I1126 07:04:05.647645 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr_b8b55d68-fcd3-43c4-94fe-344ed7cdb002/util/0.log" Nov 26 07:04:05 crc kubenswrapper[4871]: I1126 07:04:05.801598 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr_b8b55d68-fcd3-43c4-94fe-344ed7cdb002/extract/0.log" Nov 26 07:04:05 crc kubenswrapper[4871]: I1126 07:04:05.831715 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr_b8b55d68-fcd3-43c4-94fe-344ed7cdb002/pull/0.log" Nov 26 07:04:05 crc kubenswrapper[4871]: I1126 07:04:05.842348 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772emsprr_b8b55d68-fcd3-43c4-94fe-344ed7cdb002/util/0.log" Nov 26 07:04:05 crc kubenswrapper[4871]: I1126 07:04:05.996007 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9/util/0.log" Nov 26 07:04:06 crc kubenswrapper[4871]: I1126 07:04:06.191159 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9/pull/0.log" Nov 26 07:04:06 crc kubenswrapper[4871]: I1126 07:04:06.230225 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9/util/0.log" Nov 26 07:04:06 crc kubenswrapper[4871]: I1126 07:04:06.243535 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9/pull/0.log" Nov 26 07:04:06 crc kubenswrapper[4871]: I1126 07:04:06.429718 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9/util/0.log" Nov 26 07:04:06 crc kubenswrapper[4871]: I1126 07:04:06.440661 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9/pull/0.log" Nov 26 07:04:06 crc kubenswrapper[4871]: I1126 07:04:06.479090 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210qzx2d_0acf5a3d-2727-42dd-a502-e7b8ad27a0a9/extract/0.log" Nov 26 07:04:06 crc kubenswrapper[4871]: I1126 07:04:06.604209 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4qdhh_4151ee9a-4d65-4438-bf55-d437df2482d8/extract-utilities/0.log" Nov 26 07:04:06 crc kubenswrapper[4871]: I1126 07:04:06.801931 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4qdhh_4151ee9a-4d65-4438-bf55-d437df2482d8/extract-content/0.log" Nov 26 07:04:06 crc kubenswrapper[4871]: I1126 07:04:06.804263 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4qdhh_4151ee9a-4d65-4438-bf55-d437df2482d8/extract-content/0.log" Nov 26 07:04:06 crc kubenswrapper[4871]: I1126 07:04:06.813194 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4qdhh_4151ee9a-4d65-4438-bf55-d437df2482d8/extract-utilities/0.log" Nov 26 07:04:06 crc kubenswrapper[4871]: I1126 07:04:06.972432 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4qdhh_4151ee9a-4d65-4438-bf55-d437df2482d8/extract-content/0.log" Nov 26 07:04:07 crc kubenswrapper[4871]: I1126 07:04:07.006371 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4qdhh_4151ee9a-4d65-4438-bf55-d437df2482d8/extract-utilities/0.log" Nov 26 07:04:07 crc kubenswrapper[4871]: I1126 07:04:07.192373 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2wpn8_e6301213-5be0-4241-ba6d-01e1cfc78b78/extract-utilities/0.log" Nov 26 07:04:07 crc kubenswrapper[4871]: I1126 07:04:07.452937 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2wpn8_e6301213-5be0-4241-ba6d-01e1cfc78b78/extract-utilities/0.log" Nov 26 07:04:07 crc kubenswrapper[4871]: I1126 07:04:07.650634 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2wpn8_e6301213-5be0-4241-ba6d-01e1cfc78b78/extract-content/0.log" Nov 26 07:04:07 crc kubenswrapper[4871]: I1126 07:04:07.680924 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2wpn8_e6301213-5be0-4241-ba6d-01e1cfc78b78/extract-content/0.log" Nov 26 07:04:07 crc kubenswrapper[4871]: I1126 07:04:07.817550 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4qdhh_4151ee9a-4d65-4438-bf55-d437df2482d8/registry-server/0.log" Nov 26 07:04:07 crc kubenswrapper[4871]: I1126 07:04:07.827290 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2wpn8_e6301213-5be0-4241-ba6d-01e1cfc78b78/extract-content/0.log" Nov 26 07:04:07 crc kubenswrapper[4871]: I1126 07:04:07.832693 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2wpn8_e6301213-5be0-4241-ba6d-01e1cfc78b78/extract-utilities/0.log" Nov 26 07:04:08 crc kubenswrapper[4871]: I1126 07:04:08.125968 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk_48fae954-7c94-4755-8e57-c910119b6089/util/0.log" Nov 26 07:04:08 crc kubenswrapper[4871]: I1126 07:04:08.322337 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk_48fae954-7c94-4755-8e57-c910119b6089/util/0.log" Nov 26 07:04:08 crc kubenswrapper[4871]: I1126 07:04:08.360188 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk_48fae954-7c94-4755-8e57-c910119b6089/pull/0.log" Nov 26 07:04:08 crc kubenswrapper[4871]: I1126 07:04:08.397273 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk_48fae954-7c94-4755-8e57-c910119b6089/pull/0.log" Nov 26 07:04:08 crc kubenswrapper[4871]: I1126 07:04:08.570572 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk_48fae954-7c94-4755-8e57-c910119b6089/util/0.log" Nov 26 07:04:08 crc kubenswrapper[4871]: I1126 07:04:08.590778 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk_48fae954-7c94-4755-8e57-c910119b6089/extract/0.log" Nov 26 07:04:08 crc kubenswrapper[4871]: I1126 07:04:08.602852 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c64k6xk_48fae954-7c94-4755-8e57-c910119b6089/pull/0.log" Nov 26 07:04:08 crc kubenswrapper[4871]: I1126 07:04:08.753245 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qks66_65ad1a09-cc57-45f2-9a13-2d83b8b8221c/marketplace-operator/1.log" Nov 26 07:04:08 crc kubenswrapper[4871]: I1126 07:04:08.842214 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qks66_65ad1a09-cc57-45f2-9a13-2d83b8b8221c/marketplace-operator/0.log" Nov 26 07:04:08 crc kubenswrapper[4871]: I1126 07:04:08.908383 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2wpn8_e6301213-5be0-4241-ba6d-01e1cfc78b78/registry-server/0.log" Nov 26 07:04:09 crc kubenswrapper[4871]: I1126 07:04:09.045204 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fqzs2_9879edf7-a11e-49fa-a1ad-b8057cc59072/extract-utilities/0.log" Nov 26 07:04:09 crc kubenswrapper[4871]: I1126 07:04:09.183402 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fqzs2_9879edf7-a11e-49fa-a1ad-b8057cc59072/extract-utilities/0.log" Nov 26 07:04:09 crc kubenswrapper[4871]: I1126 07:04:09.191924 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fqzs2_9879edf7-a11e-49fa-a1ad-b8057cc59072/extract-content/0.log" Nov 26 07:04:09 crc kubenswrapper[4871]: I1126 07:04:09.233266 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fqzs2_9879edf7-a11e-49fa-a1ad-b8057cc59072/extract-content/0.log" Nov 26 07:04:09 crc kubenswrapper[4871]: I1126 07:04:09.458093 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fqzs2_9879edf7-a11e-49fa-a1ad-b8057cc59072/extract-utilities/0.log" Nov 26 07:04:09 crc kubenswrapper[4871]: I1126 07:04:09.473698 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fqzs2_9879edf7-a11e-49fa-a1ad-b8057cc59072/extract-content/0.log" Nov 26 07:04:09 crc kubenswrapper[4871]: I1126 07:04:09.665750 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fqzs2_9879edf7-a11e-49fa-a1ad-b8057cc59072/registry-server/0.log" Nov 26 07:04:09 crc kubenswrapper[4871]: I1126 07:04:09.673583 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xb2lw_5113372b-1125-4d32-8af6-160defd5579a/extract-utilities/0.log" Nov 26 07:04:09 crc kubenswrapper[4871]: I1126 07:04:09.811588 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xb2lw_5113372b-1125-4d32-8af6-160defd5579a/extract-content/0.log" Nov 26 07:04:09 crc kubenswrapper[4871]: I1126 07:04:09.827878 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xb2lw_5113372b-1125-4d32-8af6-160defd5579a/extract-utilities/0.log" Nov 26 07:04:09 crc kubenswrapper[4871]: I1126 07:04:09.830393 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xb2lw_5113372b-1125-4d32-8af6-160defd5579a/extract-content/0.log" Nov 26 07:04:10 crc kubenswrapper[4871]: I1126 07:04:10.037576 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xb2lw_5113372b-1125-4d32-8af6-160defd5579a/extract-utilities/0.log" Nov 26 07:04:10 crc kubenswrapper[4871]: I1126 07:04:10.042576 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xb2lw_5113372b-1125-4d32-8af6-160defd5579a/extract-content/0.log" Nov 26 07:04:10 crc kubenswrapper[4871]: I1126 07:04:10.747021 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xb2lw_5113372b-1125-4d32-8af6-160defd5579a/registry-server/0.log" Nov 26 07:04:17 crc kubenswrapper[4871]: I1126 07:04:17.507638 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:04:17 crc kubenswrapper[4871]: E1126 07:04:17.510096 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:04:22 crc kubenswrapper[4871]: I1126 07:04:22.822103 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-bmfw7_b9220e8d-267e-4462-b6ea-094a0f724eb3/prometheus-operator/0.log" Nov 26 07:04:22 crc kubenswrapper[4871]: I1126 07:04:22.967151 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-58b986bd6b-b4hdj_995a4906-508d-4285-b40c-5b14fd9d7b98/prometheus-operator-admission-webhook/0.log" Nov 26 07:04:23 crc kubenswrapper[4871]: I1126 07:04:23.024716 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-58b986bd6b-x7qx5_3cd1c1e8-5430-4209-a0e2-3176d0ebb70a/prometheus-operator-admission-webhook/0.log" Nov 26 07:04:23 crc kubenswrapper[4871]: I1126 07:04:23.178692 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-l8rk7_2e5f535a-ead4-47e3-a477-20cf74b0828a/operator/0.log" Nov 26 07:04:23 crc kubenswrapper[4871]: I1126 07:04:23.228982 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-9f8vw_b0a308b8-6586-4d48-b431-ce0c6f46a23e/perses-operator/0.log" Nov 26 07:04:28 crc kubenswrapper[4871]: I1126 07:04:28.507910 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:04:28 crc kubenswrapper[4871]: E1126 07:04:28.508713 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:04:42 crc kubenswrapper[4871]: E1126 07:04:42.761165 4871 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.44:34252->38.102.83.44:38809: write tcp 38.102.83.44:34252->38.102.83.44:38809: write: broken pipe Nov 26 07:04:43 crc kubenswrapper[4871]: I1126 07:04:43.507341 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:04:43 crc kubenswrapper[4871]: E1126 07:04:43.508109 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:04:56 crc kubenswrapper[4871]: I1126 07:04:56.508235 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:04:56 crc kubenswrapper[4871]: E1126 07:04:56.509037 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:05:09 crc kubenswrapper[4871]: I1126 07:05:09.506994 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:05:09 crc kubenswrapper[4871]: E1126 07:05:09.507812 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:05:22 crc kubenswrapper[4871]: I1126 07:05:22.513198 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:05:22 crc kubenswrapper[4871]: E1126 07:05:22.514079 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:05:33 crc kubenswrapper[4871]: I1126 07:05:33.508946 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:05:33 crc kubenswrapper[4871]: E1126 07:05:33.510282 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:05:46 crc kubenswrapper[4871]: I1126 07:05:46.508253 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:05:46 crc kubenswrapper[4871]: E1126 07:05:46.509771 4871 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-zmlz2_openshift-machine-config-operator(3cd6a6d4-9b5f-4d27-a839-d37960bff02c)\"" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" Nov 26 07:06:01 crc kubenswrapper[4871]: I1126 07:06:01.508139 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:06:02 crc kubenswrapper[4871]: I1126 07:06:02.278572 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"c26f5d21da3b5a3de8aa6b5e6309ead68ccbea20df8b610cb4efe9d0b496a3e7"} Nov 26 07:06:24 crc kubenswrapper[4871]: I1126 07:06:24.153154 4871 generic.go:334] "Generic (PLEG): container finished" podID="95398267-c3f9-4b92-876c-9e5c594e63cb" containerID="26b782acd8d954890425c38deb57daa79f731e5d2dc03fdf80c5c5d3a87b91a9" exitCode=0 Nov 26 07:06:24 crc kubenswrapper[4871]: I1126 07:06:24.153244 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vsr79/must-gather-rtl88" event={"ID":"95398267-c3f9-4b92-876c-9e5c594e63cb","Type":"ContainerDied","Data":"26b782acd8d954890425c38deb57daa79f731e5d2dc03fdf80c5c5d3a87b91a9"} Nov 26 07:06:24 crc kubenswrapper[4871]: I1126 07:06:24.154110 4871 scope.go:117] "RemoveContainer" containerID="26b782acd8d954890425c38deb57daa79f731e5d2dc03fdf80c5c5d3a87b91a9" Nov 26 07:06:24 crc kubenswrapper[4871]: I1126 07:06:24.667216 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-vsr79_must-gather-rtl88_95398267-c3f9-4b92-876c-9e5c594e63cb/gather/0.log" Nov 26 07:06:37 crc kubenswrapper[4871]: I1126 07:06:37.129412 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-vsr79/must-gather-rtl88"] Nov 26 07:06:37 crc kubenswrapper[4871]: I1126 07:06:37.130243 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-vsr79/must-gather-rtl88" podUID="95398267-c3f9-4b92-876c-9e5c594e63cb" containerName="copy" containerID="cri-o://8d1c6fac7c9a6ca2bfb3bcf189ec7bcdb89901fde8d19ac7707314584b609ba7" gracePeriod=2 Nov 26 07:06:37 crc kubenswrapper[4871]: I1126 07:06:37.142842 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-vsr79/must-gather-rtl88"] Nov 26 07:06:37 crc kubenswrapper[4871]: I1126 07:06:37.310770 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-vsr79_must-gather-rtl88_95398267-c3f9-4b92-876c-9e5c594e63cb/copy/0.log" Nov 26 07:06:37 crc kubenswrapper[4871]: I1126 07:06:37.311043 4871 generic.go:334] "Generic (PLEG): container finished" podID="95398267-c3f9-4b92-876c-9e5c594e63cb" containerID="8d1c6fac7c9a6ca2bfb3bcf189ec7bcdb89901fde8d19ac7707314584b609ba7" exitCode=143 Nov 26 07:06:37 crc kubenswrapper[4871]: I1126 07:06:37.663400 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-vsr79_must-gather-rtl88_95398267-c3f9-4b92-876c-9e5c594e63cb/copy/0.log" Nov 26 07:06:37 crc kubenswrapper[4871]: I1126 07:06:37.664109 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vsr79/must-gather-rtl88" Nov 26 07:06:37 crc kubenswrapper[4871]: I1126 07:06:37.836607 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/95398267-c3f9-4b92-876c-9e5c594e63cb-must-gather-output\") pod \"95398267-c3f9-4b92-876c-9e5c594e63cb\" (UID: \"95398267-c3f9-4b92-876c-9e5c594e63cb\") " Nov 26 07:06:37 crc kubenswrapper[4871]: I1126 07:06:37.837023 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8nx6\" (UniqueName: \"kubernetes.io/projected/95398267-c3f9-4b92-876c-9e5c594e63cb-kube-api-access-j8nx6\") pod \"95398267-c3f9-4b92-876c-9e5c594e63cb\" (UID: \"95398267-c3f9-4b92-876c-9e5c594e63cb\") " Nov 26 07:06:37 crc kubenswrapper[4871]: I1126 07:06:37.847837 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95398267-c3f9-4b92-876c-9e5c594e63cb-kube-api-access-j8nx6" (OuterVolumeSpecName: "kube-api-access-j8nx6") pod "95398267-c3f9-4b92-876c-9e5c594e63cb" (UID: "95398267-c3f9-4b92-876c-9e5c594e63cb"). InnerVolumeSpecName "kube-api-access-j8nx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:06:37 crc kubenswrapper[4871]: I1126 07:06:37.939279 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8nx6\" (UniqueName: \"kubernetes.io/projected/95398267-c3f9-4b92-876c-9e5c594e63cb-kube-api-access-j8nx6\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:38 crc kubenswrapper[4871]: I1126 07:06:38.045929 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95398267-c3f9-4b92-876c-9e5c594e63cb-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "95398267-c3f9-4b92-876c-9e5c594e63cb" (UID: "95398267-c3f9-4b92-876c-9e5c594e63cb"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:06:38 crc kubenswrapper[4871]: I1126 07:06:38.144256 4871 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/95398267-c3f9-4b92-876c-9e5c594e63cb-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 26 07:06:38 crc kubenswrapper[4871]: I1126 07:06:38.322591 4871 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-vsr79_must-gather-rtl88_95398267-c3f9-4b92-876c-9e5c594e63cb/copy/0.log" Nov 26 07:06:38 crc kubenswrapper[4871]: I1126 07:06:38.322992 4871 scope.go:117] "RemoveContainer" containerID="8d1c6fac7c9a6ca2bfb3bcf189ec7bcdb89901fde8d19ac7707314584b609ba7" Nov 26 07:06:38 crc kubenswrapper[4871]: I1126 07:06:38.323140 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vsr79/must-gather-rtl88" Nov 26 07:06:38 crc kubenswrapper[4871]: I1126 07:06:38.344445 4871 scope.go:117] "RemoveContainer" containerID="26b782acd8d954890425c38deb57daa79f731e5d2dc03fdf80c5c5d3a87b91a9" Nov 26 07:06:38 crc kubenswrapper[4871]: I1126 07:06:38.524151 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95398267-c3f9-4b92-876c-9e5c594e63cb" path="/var/lib/kubelet/pods/95398267-c3f9-4b92-876c-9e5c594e63cb/volumes" Nov 26 07:08:23 crc kubenswrapper[4871]: I1126 07:08:23.615387 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:08:23 crc kubenswrapper[4871]: I1126 07:08:23.616003 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:08:25 crc kubenswrapper[4871]: I1126 07:08:25.312006 4871 scope.go:117] "RemoveContainer" containerID="2fab019b1173d4acae2800f4dc5544604cbef0a034705320e202a8fa3a3fac4a" Nov 26 07:08:53 crc kubenswrapper[4871]: I1126 07:08:53.614662 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:08:53 crc kubenswrapper[4871]: I1126 07:08:53.615168 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:09:08 crc kubenswrapper[4871]: I1126 07:09:08.676500 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zc6jr"] Nov 26 07:09:08 crc kubenswrapper[4871]: E1126 07:09:08.677577 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95398267-c3f9-4b92-876c-9e5c594e63cb" containerName="copy" Nov 26 07:09:08 crc kubenswrapper[4871]: I1126 07:09:08.677592 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="95398267-c3f9-4b92-876c-9e5c594e63cb" containerName="copy" Nov 26 07:09:08 crc kubenswrapper[4871]: E1126 07:09:08.677604 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9153a6a-99bb-462b-99cd-da4d0d1f00e9" containerName="container-00" Nov 26 07:09:08 crc kubenswrapper[4871]: I1126 07:09:08.677610 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9153a6a-99bb-462b-99cd-da4d0d1f00e9" containerName="container-00" Nov 26 07:09:08 crc kubenswrapper[4871]: E1126 07:09:08.677648 4871 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95398267-c3f9-4b92-876c-9e5c594e63cb" containerName="gather" Nov 26 07:09:08 crc kubenswrapper[4871]: I1126 07:09:08.677654 4871 state_mem.go:107] "Deleted CPUSet assignment" podUID="95398267-c3f9-4b92-876c-9e5c594e63cb" containerName="gather" Nov 26 07:09:08 crc kubenswrapper[4871]: I1126 07:09:08.677871 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9153a6a-99bb-462b-99cd-da4d0d1f00e9" containerName="container-00" Nov 26 07:09:08 crc kubenswrapper[4871]: I1126 07:09:08.677881 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="95398267-c3f9-4b92-876c-9e5c594e63cb" containerName="copy" Nov 26 07:09:08 crc kubenswrapper[4871]: I1126 07:09:08.677904 4871 memory_manager.go:354] "RemoveStaleState removing state" podUID="95398267-c3f9-4b92-876c-9e5c594e63cb" containerName="gather" Nov 26 07:09:08 crc kubenswrapper[4871]: I1126 07:09:08.679480 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zc6jr" Nov 26 07:09:08 crc kubenswrapper[4871]: I1126 07:09:08.693116 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zc6jr"] Nov 26 07:09:08 crc kubenswrapper[4871]: I1126 07:09:08.839107 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/599076ef-73e7-4be5-b1b3-47eaf43ee728-catalog-content\") pod \"certified-operators-zc6jr\" (UID: \"599076ef-73e7-4be5-b1b3-47eaf43ee728\") " pod="openshift-marketplace/certified-operators-zc6jr" Nov 26 07:09:08 crc kubenswrapper[4871]: I1126 07:09:08.839207 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knpls\" (UniqueName: \"kubernetes.io/projected/599076ef-73e7-4be5-b1b3-47eaf43ee728-kube-api-access-knpls\") pod \"certified-operators-zc6jr\" (UID: \"599076ef-73e7-4be5-b1b3-47eaf43ee728\") " pod="openshift-marketplace/certified-operators-zc6jr" Nov 26 07:09:08 crc kubenswrapper[4871]: I1126 07:09:08.839552 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/599076ef-73e7-4be5-b1b3-47eaf43ee728-utilities\") pod \"certified-operators-zc6jr\" (UID: \"599076ef-73e7-4be5-b1b3-47eaf43ee728\") " pod="openshift-marketplace/certified-operators-zc6jr" Nov 26 07:09:08 crc kubenswrapper[4871]: I1126 07:09:08.942277 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/599076ef-73e7-4be5-b1b3-47eaf43ee728-catalog-content\") pod \"certified-operators-zc6jr\" (UID: \"599076ef-73e7-4be5-b1b3-47eaf43ee728\") " pod="openshift-marketplace/certified-operators-zc6jr" Nov 26 07:09:08 crc kubenswrapper[4871]: I1126 07:09:08.942410 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knpls\" (UniqueName: \"kubernetes.io/projected/599076ef-73e7-4be5-b1b3-47eaf43ee728-kube-api-access-knpls\") pod \"certified-operators-zc6jr\" (UID: \"599076ef-73e7-4be5-b1b3-47eaf43ee728\") " pod="openshift-marketplace/certified-operators-zc6jr" Nov 26 07:09:08 crc kubenswrapper[4871]: I1126 07:09:08.942513 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/599076ef-73e7-4be5-b1b3-47eaf43ee728-utilities\") pod \"certified-operators-zc6jr\" (UID: \"599076ef-73e7-4be5-b1b3-47eaf43ee728\") " pod="openshift-marketplace/certified-operators-zc6jr" Nov 26 07:09:08 crc kubenswrapper[4871]: I1126 07:09:08.943274 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/599076ef-73e7-4be5-b1b3-47eaf43ee728-utilities\") pod \"certified-operators-zc6jr\" (UID: \"599076ef-73e7-4be5-b1b3-47eaf43ee728\") " pod="openshift-marketplace/certified-operators-zc6jr" Nov 26 07:09:08 crc kubenswrapper[4871]: I1126 07:09:08.943267 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/599076ef-73e7-4be5-b1b3-47eaf43ee728-catalog-content\") pod \"certified-operators-zc6jr\" (UID: \"599076ef-73e7-4be5-b1b3-47eaf43ee728\") " pod="openshift-marketplace/certified-operators-zc6jr" Nov 26 07:09:08 crc kubenswrapper[4871]: I1126 07:09:08.978969 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knpls\" (UniqueName: \"kubernetes.io/projected/599076ef-73e7-4be5-b1b3-47eaf43ee728-kube-api-access-knpls\") pod \"certified-operators-zc6jr\" (UID: \"599076ef-73e7-4be5-b1b3-47eaf43ee728\") " pod="openshift-marketplace/certified-operators-zc6jr" Nov 26 07:09:09 crc kubenswrapper[4871]: I1126 07:09:09.011413 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zc6jr" Nov 26 07:09:09 crc kubenswrapper[4871]: I1126 07:09:09.541447 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zc6jr"] Nov 26 07:09:09 crc kubenswrapper[4871]: I1126 07:09:09.957845 4871 generic.go:334] "Generic (PLEG): container finished" podID="599076ef-73e7-4be5-b1b3-47eaf43ee728" containerID="917f3ba360893f7ae0f28cd29a09651f86e2329980862ab7045a299f4c75ebce" exitCode=0 Nov 26 07:09:09 crc kubenswrapper[4871]: I1126 07:09:09.957948 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zc6jr" event={"ID":"599076ef-73e7-4be5-b1b3-47eaf43ee728","Type":"ContainerDied","Data":"917f3ba360893f7ae0f28cd29a09651f86e2329980862ab7045a299f4c75ebce"} Nov 26 07:09:09 crc kubenswrapper[4871]: I1126 07:09:09.958237 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zc6jr" event={"ID":"599076ef-73e7-4be5-b1b3-47eaf43ee728","Type":"ContainerStarted","Data":"282fdaa73e6961ef999ec69681ed479d0670d7fb5ee5b7acdf6cda80980bcfb7"} Nov 26 07:09:09 crc kubenswrapper[4871]: I1126 07:09:09.960369 4871 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 07:09:10 crc kubenswrapper[4871]: I1126 07:09:10.971329 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zc6jr" event={"ID":"599076ef-73e7-4be5-b1b3-47eaf43ee728","Type":"ContainerStarted","Data":"caf49379e7b05ccfad298a7ca97ddf744ca9634a11f34e2068f7fb8b11be70dc"} Nov 26 07:09:11 crc kubenswrapper[4871]: I1126 07:09:11.062434 4871 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-x9cm5"] Nov 26 07:09:11 crc kubenswrapper[4871]: I1126 07:09:11.064672 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x9cm5" Nov 26 07:09:11 crc kubenswrapper[4871]: I1126 07:09:11.074742 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x9cm5"] Nov 26 07:09:11 crc kubenswrapper[4871]: I1126 07:09:11.202653 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hhl2\" (UniqueName: \"kubernetes.io/projected/20cc6f15-506c-4137-b7ce-0c93af7e8472-kube-api-access-6hhl2\") pod \"community-operators-x9cm5\" (UID: \"20cc6f15-506c-4137-b7ce-0c93af7e8472\") " pod="openshift-marketplace/community-operators-x9cm5" Nov 26 07:09:11 crc kubenswrapper[4871]: I1126 07:09:11.202704 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20cc6f15-506c-4137-b7ce-0c93af7e8472-utilities\") pod \"community-operators-x9cm5\" (UID: \"20cc6f15-506c-4137-b7ce-0c93af7e8472\") " pod="openshift-marketplace/community-operators-x9cm5" Nov 26 07:09:11 crc kubenswrapper[4871]: I1126 07:09:11.203052 4871 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20cc6f15-506c-4137-b7ce-0c93af7e8472-catalog-content\") pod \"community-operators-x9cm5\" (UID: \"20cc6f15-506c-4137-b7ce-0c93af7e8472\") " pod="openshift-marketplace/community-operators-x9cm5" Nov 26 07:09:11 crc kubenswrapper[4871]: I1126 07:09:11.304791 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20cc6f15-506c-4137-b7ce-0c93af7e8472-catalog-content\") pod \"community-operators-x9cm5\" (UID: \"20cc6f15-506c-4137-b7ce-0c93af7e8472\") " pod="openshift-marketplace/community-operators-x9cm5" Nov 26 07:09:11 crc kubenswrapper[4871]: I1126 07:09:11.304890 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hhl2\" (UniqueName: \"kubernetes.io/projected/20cc6f15-506c-4137-b7ce-0c93af7e8472-kube-api-access-6hhl2\") pod \"community-operators-x9cm5\" (UID: \"20cc6f15-506c-4137-b7ce-0c93af7e8472\") " pod="openshift-marketplace/community-operators-x9cm5" Nov 26 07:09:11 crc kubenswrapper[4871]: I1126 07:09:11.304911 4871 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20cc6f15-506c-4137-b7ce-0c93af7e8472-utilities\") pod \"community-operators-x9cm5\" (UID: \"20cc6f15-506c-4137-b7ce-0c93af7e8472\") " pod="openshift-marketplace/community-operators-x9cm5" Nov 26 07:09:11 crc kubenswrapper[4871]: I1126 07:09:11.305917 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20cc6f15-506c-4137-b7ce-0c93af7e8472-catalog-content\") pod \"community-operators-x9cm5\" (UID: \"20cc6f15-506c-4137-b7ce-0c93af7e8472\") " pod="openshift-marketplace/community-operators-x9cm5" Nov 26 07:09:11 crc kubenswrapper[4871]: I1126 07:09:11.305952 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20cc6f15-506c-4137-b7ce-0c93af7e8472-utilities\") pod \"community-operators-x9cm5\" (UID: \"20cc6f15-506c-4137-b7ce-0c93af7e8472\") " pod="openshift-marketplace/community-operators-x9cm5" Nov 26 07:09:11 crc kubenswrapper[4871]: I1126 07:09:11.324139 4871 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hhl2\" (UniqueName: \"kubernetes.io/projected/20cc6f15-506c-4137-b7ce-0c93af7e8472-kube-api-access-6hhl2\") pod \"community-operators-x9cm5\" (UID: \"20cc6f15-506c-4137-b7ce-0c93af7e8472\") " pod="openshift-marketplace/community-operators-x9cm5" Nov 26 07:09:11 crc kubenswrapper[4871]: I1126 07:09:11.393796 4871 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x9cm5" Nov 26 07:09:11 crc kubenswrapper[4871]: I1126 07:09:11.985914 4871 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x9cm5"] Nov 26 07:09:12 crc kubenswrapper[4871]: I1126 07:09:12.988948 4871 generic.go:334] "Generic (PLEG): container finished" podID="20cc6f15-506c-4137-b7ce-0c93af7e8472" containerID="234709734ecf7482cf81eab7739a789e8db62906d276c78d36995573e42e7ae5" exitCode=0 Nov 26 07:09:12 crc kubenswrapper[4871]: I1126 07:09:12.989054 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x9cm5" event={"ID":"20cc6f15-506c-4137-b7ce-0c93af7e8472","Type":"ContainerDied","Data":"234709734ecf7482cf81eab7739a789e8db62906d276c78d36995573e42e7ae5"} Nov 26 07:09:12 crc kubenswrapper[4871]: I1126 07:09:12.989295 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x9cm5" event={"ID":"20cc6f15-506c-4137-b7ce-0c93af7e8472","Type":"ContainerStarted","Data":"85b7665aa5933eac7d5335226654279e0bcd8b53ed3dfa6c8e33ffd75ab5cd78"} Nov 26 07:09:12 crc kubenswrapper[4871]: I1126 07:09:12.991124 4871 generic.go:334] "Generic (PLEG): container finished" podID="599076ef-73e7-4be5-b1b3-47eaf43ee728" containerID="caf49379e7b05ccfad298a7ca97ddf744ca9634a11f34e2068f7fb8b11be70dc" exitCode=0 Nov 26 07:09:12 crc kubenswrapper[4871]: I1126 07:09:12.991164 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zc6jr" event={"ID":"599076ef-73e7-4be5-b1b3-47eaf43ee728","Type":"ContainerDied","Data":"caf49379e7b05ccfad298a7ca97ddf744ca9634a11f34e2068f7fb8b11be70dc"} Nov 26 07:09:15 crc kubenswrapper[4871]: I1126 07:09:15.014831 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zc6jr" event={"ID":"599076ef-73e7-4be5-b1b3-47eaf43ee728","Type":"ContainerStarted","Data":"667be2ab05df5f57b88b3771157e53dd631cb52428985ce546eecd104d48c9b6"} Nov 26 07:09:15 crc kubenswrapper[4871]: I1126 07:09:15.016643 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x9cm5" event={"ID":"20cc6f15-506c-4137-b7ce-0c93af7e8472","Type":"ContainerStarted","Data":"0910e972fb612c935ab7cac74dd1dc6ac330b695ddba285c34895ae3d5dbcfe1"} Nov 26 07:09:15 crc kubenswrapper[4871]: I1126 07:09:15.043899 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zc6jr" podStartSLOduration=3.171122647 podStartE2EDuration="7.043862046s" podCreationTimestamp="2025-11-26 07:09:08 +0000 UTC" firstStartedPulling="2025-11-26 07:09:09.960053091 +0000 UTC m=+6208.143104677" lastFinishedPulling="2025-11-26 07:09:13.83279248 +0000 UTC m=+6212.015844076" observedRunningTime="2025-11-26 07:09:15.038913193 +0000 UTC m=+6213.221964799" watchObservedRunningTime="2025-11-26 07:09:15.043862046 +0000 UTC m=+6213.226913642" Nov 26 07:09:19 crc kubenswrapper[4871]: I1126 07:09:19.013039 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zc6jr" Nov 26 07:09:19 crc kubenswrapper[4871]: I1126 07:09:19.013735 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zc6jr" Nov 26 07:09:19 crc kubenswrapper[4871]: I1126 07:09:19.082640 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zc6jr" Nov 26 07:09:19 crc kubenswrapper[4871]: I1126 07:09:19.136805 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zc6jr" Nov 26 07:09:20 crc kubenswrapper[4871]: I1126 07:09:20.072943 4871 generic.go:334] "Generic (PLEG): container finished" podID="20cc6f15-506c-4137-b7ce-0c93af7e8472" containerID="0910e972fb612c935ab7cac74dd1dc6ac330b695ddba285c34895ae3d5dbcfe1" exitCode=0 Nov 26 07:09:20 crc kubenswrapper[4871]: I1126 07:09:20.073633 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x9cm5" event={"ID":"20cc6f15-506c-4137-b7ce-0c93af7e8472","Type":"ContainerDied","Data":"0910e972fb612c935ab7cac74dd1dc6ac330b695ddba285c34895ae3d5dbcfe1"} Nov 26 07:09:20 crc kubenswrapper[4871]: I1126 07:09:20.258761 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zc6jr"] Nov 26 07:09:21 crc kubenswrapper[4871]: I1126 07:09:21.084286 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x9cm5" event={"ID":"20cc6f15-506c-4137-b7ce-0c93af7e8472","Type":"ContainerStarted","Data":"4a0176f340aa92b79210ec49b9ef0a06e716d3b9e5332e02fd9d6639d24a7f4f"} Nov 26 07:09:21 crc kubenswrapper[4871]: I1126 07:09:21.085514 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zc6jr" podUID="599076ef-73e7-4be5-b1b3-47eaf43ee728" containerName="registry-server" containerID="cri-o://667be2ab05df5f57b88b3771157e53dd631cb52428985ce546eecd104d48c9b6" gracePeriod=2 Nov 26 07:09:21 crc kubenswrapper[4871]: I1126 07:09:21.111302 4871 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-x9cm5" podStartSLOduration=2.443555906 podStartE2EDuration="10.111283402s" podCreationTimestamp="2025-11-26 07:09:11 +0000 UTC" firstStartedPulling="2025-11-26 07:09:12.991086371 +0000 UTC m=+6211.174137957" lastFinishedPulling="2025-11-26 07:09:20.658813847 +0000 UTC m=+6218.841865453" observedRunningTime="2025-11-26 07:09:21.100574136 +0000 UTC m=+6219.283625722" watchObservedRunningTime="2025-11-26 07:09:21.111283402 +0000 UTC m=+6219.294334988" Nov 26 07:09:21 crc kubenswrapper[4871]: I1126 07:09:21.394642 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-x9cm5" Nov 26 07:09:21 crc kubenswrapper[4871]: I1126 07:09:21.394690 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-x9cm5" Nov 26 07:09:21 crc kubenswrapper[4871]: I1126 07:09:21.610719 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zc6jr" Nov 26 07:09:21 crc kubenswrapper[4871]: I1126 07:09:21.747795 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/599076ef-73e7-4be5-b1b3-47eaf43ee728-utilities\") pod \"599076ef-73e7-4be5-b1b3-47eaf43ee728\" (UID: \"599076ef-73e7-4be5-b1b3-47eaf43ee728\") " Nov 26 07:09:21 crc kubenswrapper[4871]: I1126 07:09:21.748179 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/599076ef-73e7-4be5-b1b3-47eaf43ee728-catalog-content\") pod \"599076ef-73e7-4be5-b1b3-47eaf43ee728\" (UID: \"599076ef-73e7-4be5-b1b3-47eaf43ee728\") " Nov 26 07:09:21 crc kubenswrapper[4871]: I1126 07:09:21.748231 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-knpls\" (UniqueName: \"kubernetes.io/projected/599076ef-73e7-4be5-b1b3-47eaf43ee728-kube-api-access-knpls\") pod \"599076ef-73e7-4be5-b1b3-47eaf43ee728\" (UID: \"599076ef-73e7-4be5-b1b3-47eaf43ee728\") " Nov 26 07:09:21 crc kubenswrapper[4871]: I1126 07:09:21.750476 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/599076ef-73e7-4be5-b1b3-47eaf43ee728-utilities" (OuterVolumeSpecName: "utilities") pod "599076ef-73e7-4be5-b1b3-47eaf43ee728" (UID: "599076ef-73e7-4be5-b1b3-47eaf43ee728"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:09:21 crc kubenswrapper[4871]: I1126 07:09:21.755478 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/599076ef-73e7-4be5-b1b3-47eaf43ee728-kube-api-access-knpls" (OuterVolumeSpecName: "kube-api-access-knpls") pod "599076ef-73e7-4be5-b1b3-47eaf43ee728" (UID: "599076ef-73e7-4be5-b1b3-47eaf43ee728"). InnerVolumeSpecName "kube-api-access-knpls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:09:21 crc kubenswrapper[4871]: I1126 07:09:21.800319 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/599076ef-73e7-4be5-b1b3-47eaf43ee728-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "599076ef-73e7-4be5-b1b3-47eaf43ee728" (UID: "599076ef-73e7-4be5-b1b3-47eaf43ee728"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:09:21 crc kubenswrapper[4871]: I1126 07:09:21.850038 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/599076ef-73e7-4be5-b1b3-47eaf43ee728-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:21 crc kubenswrapper[4871]: I1126 07:09:21.850075 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/599076ef-73e7-4be5-b1b3-47eaf43ee728-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:21 crc kubenswrapper[4871]: I1126 07:09:21.850087 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-knpls\" (UniqueName: \"kubernetes.io/projected/599076ef-73e7-4be5-b1b3-47eaf43ee728-kube-api-access-knpls\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:22 crc kubenswrapper[4871]: I1126 07:09:22.096188 4871 generic.go:334] "Generic (PLEG): container finished" podID="599076ef-73e7-4be5-b1b3-47eaf43ee728" containerID="667be2ab05df5f57b88b3771157e53dd631cb52428985ce546eecd104d48c9b6" exitCode=0 Nov 26 07:09:22 crc kubenswrapper[4871]: I1126 07:09:22.096247 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zc6jr" Nov 26 07:09:22 crc kubenswrapper[4871]: I1126 07:09:22.096247 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zc6jr" event={"ID":"599076ef-73e7-4be5-b1b3-47eaf43ee728","Type":"ContainerDied","Data":"667be2ab05df5f57b88b3771157e53dd631cb52428985ce546eecd104d48c9b6"} Nov 26 07:09:22 crc kubenswrapper[4871]: I1126 07:09:22.096309 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zc6jr" event={"ID":"599076ef-73e7-4be5-b1b3-47eaf43ee728","Type":"ContainerDied","Data":"282fdaa73e6961ef999ec69681ed479d0670d7fb5ee5b7acdf6cda80980bcfb7"} Nov 26 07:09:22 crc kubenswrapper[4871]: I1126 07:09:22.096335 4871 scope.go:117] "RemoveContainer" containerID="667be2ab05df5f57b88b3771157e53dd631cb52428985ce546eecd104d48c9b6" Nov 26 07:09:22 crc kubenswrapper[4871]: I1126 07:09:22.133960 4871 scope.go:117] "RemoveContainer" containerID="caf49379e7b05ccfad298a7ca97ddf744ca9634a11f34e2068f7fb8b11be70dc" Nov 26 07:09:22 crc kubenswrapper[4871]: I1126 07:09:22.152450 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zc6jr"] Nov 26 07:09:22 crc kubenswrapper[4871]: I1126 07:09:22.183093 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zc6jr"] Nov 26 07:09:22 crc kubenswrapper[4871]: I1126 07:09:22.190777 4871 scope.go:117] "RemoveContainer" containerID="917f3ba360893f7ae0f28cd29a09651f86e2329980862ab7045a299f4c75ebce" Nov 26 07:09:22 crc kubenswrapper[4871]: I1126 07:09:22.237412 4871 scope.go:117] "RemoveContainer" containerID="667be2ab05df5f57b88b3771157e53dd631cb52428985ce546eecd104d48c9b6" Nov 26 07:09:22 crc kubenswrapper[4871]: E1126 07:09:22.237950 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"667be2ab05df5f57b88b3771157e53dd631cb52428985ce546eecd104d48c9b6\": container with ID starting with 667be2ab05df5f57b88b3771157e53dd631cb52428985ce546eecd104d48c9b6 not found: ID does not exist" containerID="667be2ab05df5f57b88b3771157e53dd631cb52428985ce546eecd104d48c9b6" Nov 26 07:09:22 crc kubenswrapper[4871]: I1126 07:09:22.237999 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"667be2ab05df5f57b88b3771157e53dd631cb52428985ce546eecd104d48c9b6"} err="failed to get container status \"667be2ab05df5f57b88b3771157e53dd631cb52428985ce546eecd104d48c9b6\": rpc error: code = NotFound desc = could not find container \"667be2ab05df5f57b88b3771157e53dd631cb52428985ce546eecd104d48c9b6\": container with ID starting with 667be2ab05df5f57b88b3771157e53dd631cb52428985ce546eecd104d48c9b6 not found: ID does not exist" Nov 26 07:09:22 crc kubenswrapper[4871]: I1126 07:09:22.238028 4871 scope.go:117] "RemoveContainer" containerID="caf49379e7b05ccfad298a7ca97ddf744ca9634a11f34e2068f7fb8b11be70dc" Nov 26 07:09:22 crc kubenswrapper[4871]: E1126 07:09:22.238380 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"caf49379e7b05ccfad298a7ca97ddf744ca9634a11f34e2068f7fb8b11be70dc\": container with ID starting with caf49379e7b05ccfad298a7ca97ddf744ca9634a11f34e2068f7fb8b11be70dc not found: ID does not exist" containerID="caf49379e7b05ccfad298a7ca97ddf744ca9634a11f34e2068f7fb8b11be70dc" Nov 26 07:09:22 crc kubenswrapper[4871]: I1126 07:09:22.238447 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"caf49379e7b05ccfad298a7ca97ddf744ca9634a11f34e2068f7fb8b11be70dc"} err="failed to get container status \"caf49379e7b05ccfad298a7ca97ddf744ca9634a11f34e2068f7fb8b11be70dc\": rpc error: code = NotFound desc = could not find container \"caf49379e7b05ccfad298a7ca97ddf744ca9634a11f34e2068f7fb8b11be70dc\": container with ID starting with caf49379e7b05ccfad298a7ca97ddf744ca9634a11f34e2068f7fb8b11be70dc not found: ID does not exist" Nov 26 07:09:22 crc kubenswrapper[4871]: I1126 07:09:22.238462 4871 scope.go:117] "RemoveContainer" containerID="917f3ba360893f7ae0f28cd29a09651f86e2329980862ab7045a299f4c75ebce" Nov 26 07:09:22 crc kubenswrapper[4871]: E1126 07:09:22.238842 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"917f3ba360893f7ae0f28cd29a09651f86e2329980862ab7045a299f4c75ebce\": container with ID starting with 917f3ba360893f7ae0f28cd29a09651f86e2329980862ab7045a299f4c75ebce not found: ID does not exist" containerID="917f3ba360893f7ae0f28cd29a09651f86e2329980862ab7045a299f4c75ebce" Nov 26 07:09:22 crc kubenswrapper[4871]: I1126 07:09:22.238860 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"917f3ba360893f7ae0f28cd29a09651f86e2329980862ab7045a299f4c75ebce"} err="failed to get container status \"917f3ba360893f7ae0f28cd29a09651f86e2329980862ab7045a299f4c75ebce\": rpc error: code = NotFound desc = could not find container \"917f3ba360893f7ae0f28cd29a09651f86e2329980862ab7045a299f4c75ebce\": container with ID starting with 917f3ba360893f7ae0f28cd29a09651f86e2329980862ab7045a299f4c75ebce not found: ID does not exist" Nov 26 07:09:22 crc kubenswrapper[4871]: I1126 07:09:22.517603 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="599076ef-73e7-4be5-b1b3-47eaf43ee728" path="/var/lib/kubelet/pods/599076ef-73e7-4be5-b1b3-47eaf43ee728/volumes" Nov 26 07:09:22 crc kubenswrapper[4871]: I1126 07:09:22.527390 4871 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-x9cm5" podUID="20cc6f15-506c-4137-b7ce-0c93af7e8472" containerName="registry-server" probeResult="failure" output=< Nov 26 07:09:22 crc kubenswrapper[4871]: timeout: failed to connect service ":50051" within 1s Nov 26 07:09:22 crc kubenswrapper[4871]: > Nov 26 07:09:23 crc kubenswrapper[4871]: I1126 07:09:23.614966 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:09:23 crc kubenswrapper[4871]: I1126 07:09:23.615055 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 07:09:23 crc kubenswrapper[4871]: I1126 07:09:23.615122 4871 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" Nov 26 07:09:23 crc kubenswrapper[4871]: I1126 07:09:23.616346 4871 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c26f5d21da3b5a3de8aa6b5e6309ead68ccbea20df8b610cb4efe9d0b496a3e7"} pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 07:09:23 crc kubenswrapper[4871]: I1126 07:09:23.616426 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" containerID="cri-o://c26f5d21da3b5a3de8aa6b5e6309ead68ccbea20df8b610cb4efe9d0b496a3e7" gracePeriod=600 Nov 26 07:09:24 crc kubenswrapper[4871]: I1126 07:09:24.121065 4871 generic.go:334] "Generic (PLEG): container finished" podID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerID="c26f5d21da3b5a3de8aa6b5e6309ead68ccbea20df8b610cb4efe9d0b496a3e7" exitCode=0 Nov 26 07:09:24 crc kubenswrapper[4871]: I1126 07:09:24.121131 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerDied","Data":"c26f5d21da3b5a3de8aa6b5e6309ead68ccbea20df8b610cb4efe9d0b496a3e7"} Nov 26 07:09:24 crc kubenswrapper[4871]: I1126 07:09:24.121680 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" event={"ID":"3cd6a6d4-9b5f-4d27-a839-d37960bff02c","Type":"ContainerStarted","Data":"609106f368c69378a182cf7a1d9f524581b85fdd03ddb9733f0d9865efc15cc9"} Nov 26 07:09:24 crc kubenswrapper[4871]: I1126 07:09:24.121712 4871 scope.go:117] "RemoveContainer" containerID="114852fd952ea465231d1b84cc603ae07f65d35fb107d94a8bcd74f78b60b947" Nov 26 07:09:31 crc kubenswrapper[4871]: I1126 07:09:31.454918 4871 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-x9cm5" Nov 26 07:09:31 crc kubenswrapper[4871]: I1126 07:09:31.537028 4871 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-x9cm5" Nov 26 07:09:31 crc kubenswrapper[4871]: I1126 07:09:31.702606 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x9cm5"] Nov 26 07:09:33 crc kubenswrapper[4871]: I1126 07:09:33.235827 4871 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-x9cm5" podUID="20cc6f15-506c-4137-b7ce-0c93af7e8472" containerName="registry-server" containerID="cri-o://4a0176f340aa92b79210ec49b9ef0a06e716d3b9e5332e02fd9d6639d24a7f4f" gracePeriod=2 Nov 26 07:09:33 crc kubenswrapper[4871]: I1126 07:09:33.687775 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x9cm5" Nov 26 07:09:33 crc kubenswrapper[4871]: I1126 07:09:33.809010 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hhl2\" (UniqueName: \"kubernetes.io/projected/20cc6f15-506c-4137-b7ce-0c93af7e8472-kube-api-access-6hhl2\") pod \"20cc6f15-506c-4137-b7ce-0c93af7e8472\" (UID: \"20cc6f15-506c-4137-b7ce-0c93af7e8472\") " Nov 26 07:09:33 crc kubenswrapper[4871]: I1126 07:09:33.809135 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20cc6f15-506c-4137-b7ce-0c93af7e8472-utilities\") pod \"20cc6f15-506c-4137-b7ce-0c93af7e8472\" (UID: \"20cc6f15-506c-4137-b7ce-0c93af7e8472\") " Nov 26 07:09:33 crc kubenswrapper[4871]: I1126 07:09:33.809292 4871 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20cc6f15-506c-4137-b7ce-0c93af7e8472-catalog-content\") pod \"20cc6f15-506c-4137-b7ce-0c93af7e8472\" (UID: \"20cc6f15-506c-4137-b7ce-0c93af7e8472\") " Nov 26 07:09:33 crc kubenswrapper[4871]: I1126 07:09:33.810061 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20cc6f15-506c-4137-b7ce-0c93af7e8472-utilities" (OuterVolumeSpecName: "utilities") pod "20cc6f15-506c-4137-b7ce-0c93af7e8472" (UID: "20cc6f15-506c-4137-b7ce-0c93af7e8472"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:09:33 crc kubenswrapper[4871]: I1126 07:09:33.818741 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20cc6f15-506c-4137-b7ce-0c93af7e8472-kube-api-access-6hhl2" (OuterVolumeSpecName: "kube-api-access-6hhl2") pod "20cc6f15-506c-4137-b7ce-0c93af7e8472" (UID: "20cc6f15-506c-4137-b7ce-0c93af7e8472"). InnerVolumeSpecName "kube-api-access-6hhl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 07:09:33 crc kubenswrapper[4871]: I1126 07:09:33.859754 4871 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20cc6f15-506c-4137-b7ce-0c93af7e8472-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "20cc6f15-506c-4137-b7ce-0c93af7e8472" (UID: "20cc6f15-506c-4137-b7ce-0c93af7e8472"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 07:09:33 crc kubenswrapper[4871]: I1126 07:09:33.911418 4871 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/20cc6f15-506c-4137-b7ce-0c93af7e8472-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:33 crc kubenswrapper[4871]: I1126 07:09:33.911638 4871 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/20cc6f15-506c-4137-b7ce-0c93af7e8472-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:33 crc kubenswrapper[4871]: I1126 07:09:33.911727 4871 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hhl2\" (UniqueName: \"kubernetes.io/projected/20cc6f15-506c-4137-b7ce-0c93af7e8472-kube-api-access-6hhl2\") on node \"crc\" DevicePath \"\"" Nov 26 07:09:34 crc kubenswrapper[4871]: I1126 07:09:34.247350 4871 generic.go:334] "Generic (PLEG): container finished" podID="20cc6f15-506c-4137-b7ce-0c93af7e8472" containerID="4a0176f340aa92b79210ec49b9ef0a06e716d3b9e5332e02fd9d6639d24a7f4f" exitCode=0 Nov 26 07:09:34 crc kubenswrapper[4871]: I1126 07:09:34.247582 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x9cm5" event={"ID":"20cc6f15-506c-4137-b7ce-0c93af7e8472","Type":"ContainerDied","Data":"4a0176f340aa92b79210ec49b9ef0a06e716d3b9e5332e02fd9d6639d24a7f4f"} Nov 26 07:09:34 crc kubenswrapper[4871]: I1126 07:09:34.247683 4871 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x9cm5" event={"ID":"20cc6f15-506c-4137-b7ce-0c93af7e8472","Type":"ContainerDied","Data":"85b7665aa5933eac7d5335226654279e0bcd8b53ed3dfa6c8e33ffd75ab5cd78"} Nov 26 07:09:34 crc kubenswrapper[4871]: I1126 07:09:34.247710 4871 scope.go:117] "RemoveContainer" containerID="4a0176f340aa92b79210ec49b9ef0a06e716d3b9e5332e02fd9d6639d24a7f4f" Nov 26 07:09:34 crc kubenswrapper[4871]: I1126 07:09:34.247603 4871 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x9cm5" Nov 26 07:09:34 crc kubenswrapper[4871]: I1126 07:09:34.272427 4871 scope.go:117] "RemoveContainer" containerID="0910e972fb612c935ab7cac74dd1dc6ac330b695ddba285c34895ae3d5dbcfe1" Nov 26 07:09:34 crc kubenswrapper[4871]: I1126 07:09:34.292578 4871 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x9cm5"] Nov 26 07:09:34 crc kubenswrapper[4871]: I1126 07:09:34.297614 4871 scope.go:117] "RemoveContainer" containerID="234709734ecf7482cf81eab7739a789e8db62906d276c78d36995573e42e7ae5" Nov 26 07:09:34 crc kubenswrapper[4871]: I1126 07:09:34.302500 4871 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-x9cm5"] Nov 26 07:09:34 crc kubenswrapper[4871]: I1126 07:09:34.341847 4871 scope.go:117] "RemoveContainer" containerID="4a0176f340aa92b79210ec49b9ef0a06e716d3b9e5332e02fd9d6639d24a7f4f" Nov 26 07:09:34 crc kubenswrapper[4871]: E1126 07:09:34.342299 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a0176f340aa92b79210ec49b9ef0a06e716d3b9e5332e02fd9d6639d24a7f4f\": container with ID starting with 4a0176f340aa92b79210ec49b9ef0a06e716d3b9e5332e02fd9d6639d24a7f4f not found: ID does not exist" containerID="4a0176f340aa92b79210ec49b9ef0a06e716d3b9e5332e02fd9d6639d24a7f4f" Nov 26 07:09:34 crc kubenswrapper[4871]: I1126 07:09:34.342419 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a0176f340aa92b79210ec49b9ef0a06e716d3b9e5332e02fd9d6639d24a7f4f"} err="failed to get container status \"4a0176f340aa92b79210ec49b9ef0a06e716d3b9e5332e02fd9d6639d24a7f4f\": rpc error: code = NotFound desc = could not find container \"4a0176f340aa92b79210ec49b9ef0a06e716d3b9e5332e02fd9d6639d24a7f4f\": container with ID starting with 4a0176f340aa92b79210ec49b9ef0a06e716d3b9e5332e02fd9d6639d24a7f4f not found: ID does not exist" Nov 26 07:09:34 crc kubenswrapper[4871]: I1126 07:09:34.342563 4871 scope.go:117] "RemoveContainer" containerID="0910e972fb612c935ab7cac74dd1dc6ac330b695ddba285c34895ae3d5dbcfe1" Nov 26 07:09:34 crc kubenswrapper[4871]: E1126 07:09:34.342885 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0910e972fb612c935ab7cac74dd1dc6ac330b695ddba285c34895ae3d5dbcfe1\": container with ID starting with 0910e972fb612c935ab7cac74dd1dc6ac330b695ddba285c34895ae3d5dbcfe1 not found: ID does not exist" containerID="0910e972fb612c935ab7cac74dd1dc6ac330b695ddba285c34895ae3d5dbcfe1" Nov 26 07:09:34 crc kubenswrapper[4871]: I1126 07:09:34.342915 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0910e972fb612c935ab7cac74dd1dc6ac330b695ddba285c34895ae3d5dbcfe1"} err="failed to get container status \"0910e972fb612c935ab7cac74dd1dc6ac330b695ddba285c34895ae3d5dbcfe1\": rpc error: code = NotFound desc = could not find container \"0910e972fb612c935ab7cac74dd1dc6ac330b695ddba285c34895ae3d5dbcfe1\": container with ID starting with 0910e972fb612c935ab7cac74dd1dc6ac330b695ddba285c34895ae3d5dbcfe1 not found: ID does not exist" Nov 26 07:09:34 crc kubenswrapper[4871]: I1126 07:09:34.342935 4871 scope.go:117] "RemoveContainer" containerID="234709734ecf7482cf81eab7739a789e8db62906d276c78d36995573e42e7ae5" Nov 26 07:09:34 crc kubenswrapper[4871]: E1126 07:09:34.343154 4871 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"234709734ecf7482cf81eab7739a789e8db62906d276c78d36995573e42e7ae5\": container with ID starting with 234709734ecf7482cf81eab7739a789e8db62906d276c78d36995573e42e7ae5 not found: ID does not exist" containerID="234709734ecf7482cf81eab7739a789e8db62906d276c78d36995573e42e7ae5" Nov 26 07:09:34 crc kubenswrapper[4871]: I1126 07:09:34.343183 4871 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"234709734ecf7482cf81eab7739a789e8db62906d276c78d36995573e42e7ae5"} err="failed to get container status \"234709734ecf7482cf81eab7739a789e8db62906d276c78d36995573e42e7ae5\": rpc error: code = NotFound desc = could not find container \"234709734ecf7482cf81eab7739a789e8db62906d276c78d36995573e42e7ae5\": container with ID starting with 234709734ecf7482cf81eab7739a789e8db62906d276c78d36995573e42e7ae5 not found: ID does not exist" Nov 26 07:09:34 crc kubenswrapper[4871]: I1126 07:09:34.519963 4871 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20cc6f15-506c-4137-b7ce-0c93af7e8472" path="/var/lib/kubelet/pods/20cc6f15-506c-4137-b7ce-0c93af7e8472/volumes" Nov 26 07:11:23 crc kubenswrapper[4871]: I1126 07:11:23.614621 4871 patch_prober.go:28] interesting pod/machine-config-daemon-zmlz2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 07:11:23 crc kubenswrapper[4871]: I1126 07:11:23.615110 4871 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-zmlz2" podUID="3cd6a6d4-9b5f-4d27-a839-d37960bff02c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111524054024443 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111524055017361 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111507156016507 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111507156015457 5ustar corecore